id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
53,810 | def find_diffraction_calibration(
patterns,calibration_guess,library_phases,lib_gen,size,max_excitation_error = 0.01, **kwargs
):
"""Finds the optimal diffraction calibration for a pattern or set of patterns by optimizing correlation scores.
Parameters
----------
patterns : hyperspy:Signal2D object
Diffration patterns to be iteratively matched to find maximum correlation scores. Should be of known phase.
calibration_guess : float
Inital value for the diffraction calibration in inverse Angstoms per pixel.
library_phases : diffsims:StructureLibrary Object
Dictionary of structures and associated orientations for which
electron diffraction is to be simulated. Used to create the DiffractionLibrary.
lib_gen : diffsims:DiffractionLibraryGenerator Object
Computes a library of electron diffraction patterns for specified atomic
structures and orientations. Used to create the DiffractionLibrary.
size : integer
How many different steps to test for the first two iterations.
max_excitation_error : float
Gets passed to get_diffraction_library
**kwargs to be passed to index_dataset_with_template_rotation
Returns
-------
mean_cal : float
Mean of calibrations found for each pattern.
full_corrlines : np.array of shape (size*2 + 20, 2 , number of patterns)
Gives the explicit correlation vs calibration values.
found_cals : np.array of shape (number of patterns)
List of optimal calibration values for each pattern.
"""
images = patterns
num_patterns = images.data.shape[0]
found_cals = np.zeros((num_patterns))
found_cals[:] = calibration_guess
full_corrlines = np.zeros((0,2,num_patterns))
stepsize = 0.01*calibration_guess
#first set of checks
corrlines = _calibration_iteration(images,calibration_guess,library_phases,lib_gen,stepsize,size,num_patterns,max_excitation_error,**kwargs)
full_corrlines = np.append(full_corrlines,corrlines,axis = 0)
#refined calibration checks
calibration_guess = full_corrlines[full_corrlines[:,1,:].argmax(axis = 0),0,0].mean()
corrlines = _calibration_iteration(images,calibration_guess,library_phases,lib_gen,stepsize,size,num_patterns,max_excitation_error,**kwargs)
full_corrlines = np.append(full_corrlines,corrlines,axis = 0)
#more refined calibration checks with smaller step
stepsize = 0.001*calibration_guess
size = 20
calibration_guess = full_corrlines[full_corrlines[:,1,:].argmax(axis = 0),0,0].mean()
corrlines = _calibration_iteration(images,calibration_guess,library_phases,lib_gen,stepsize,size,num_patterns,max_excitation_error,**kwargs)
full_corrlines = np.append(full_corrlines,corrlines,axis = 0)
found_cals = full_corrlines[full_corrlines[:,1,:].argmax(axis = 0),0,0]
mean_cal = found_cals.mean()
return mean_cal,full_corrlines, found_cals
| def find_diffraction_calibration(
patterns,calibration_guess,library_phases,lib_gen,size,max_excitation_error = 0.01, **kwargs
):
"""Finds the optimal diffraction calibration for a pattern or set of patterns by optimizing correlation scores.
Parameters
----------
patterns : hyperspy:Signal2D object
Diffration patterns to be iteratively matched to find maximum correlation scores. Should be of known phase.
calibration_guess : float
Inital value for the diffraction calibration in inverse Angstoms per pixel.
library_phases : diffsims:StructureLibrary Object
Dictionary of structures and associated orientations for which
electron diffraction is to be simulated. Used to create the DiffractionLibrary.
lib_gen : diffsims:DiffractionLibraryGenerator Object
Computes a library of electron diffraction patterns for specified atomic
structures and orientations. Used to create the DiffractionLibrary.
size : integer
How many different steps to test for the first two iterations.
max_excitation_error : float
Gets passed to get_diffraction_library
kwargs
Keyword arguments passed to :meth:`index_dataset_with_template_rotation`.
Returns
-------
mean_cal : float
Mean of calibrations found for each pattern.
full_corrlines : np.array of shape (size*2 + 20, 2 , number of patterns)
Gives the explicit correlation vs calibration values.
found_cals : np.array of shape (number of patterns)
List of optimal calibration values for each pattern.
"""
images = patterns
num_patterns = images.data.shape[0]
found_cals = np.zeros((num_patterns))
found_cals[:] = calibration_guess
full_corrlines = np.zeros((0,2,num_patterns))
stepsize = 0.01*calibration_guess
#first set of checks
corrlines = _calibration_iteration(images,calibration_guess,library_phases,lib_gen,stepsize,size,num_patterns,max_excitation_error,**kwargs)
full_corrlines = np.append(full_corrlines,corrlines,axis = 0)
#refined calibration checks
calibration_guess = full_corrlines[full_corrlines[:,1,:].argmax(axis = 0),0,0].mean()
corrlines = _calibration_iteration(images,calibration_guess,library_phases,lib_gen,stepsize,size,num_patterns,max_excitation_error,**kwargs)
full_corrlines = np.append(full_corrlines,corrlines,axis = 0)
#more refined calibration checks with smaller step
stepsize = 0.001*calibration_guess
size = 20
calibration_guess = full_corrlines[full_corrlines[:,1,:].argmax(axis = 0),0,0].mean()
corrlines = _calibration_iteration(images,calibration_guess,library_phases,lib_gen,stepsize,size,num_patterns,max_excitation_error,**kwargs)
full_corrlines = np.append(full_corrlines,corrlines,axis = 0)
found_cals = full_corrlines[full_corrlines[:,1,:].argmax(axis = 0),0,0]
mean_cal = found_cals.mean()
return mean_cal,full_corrlines, found_cals
|
28,635 | def generate_isc_table_file(file_name,
xray_structure,
indices):
xs = xray_structure.deep_copy_scatterers()
for sc in xs.scatterers():
sc.flags.set_use_fp_fdp(False)
isc = ext.isotropic_scatterer_contribution(
xs.scatterers(),
xs.scattering_type_registry())
with open(file_name, "w") as out:
out.write("Title: generated from isotropic AFF")
out.write("\nScatterers:")
for sc in xs.scatterers():
out.write(" %s" %sc.label)
out.write("\nSymm: expanded")
sg = xs.space_group()
ml = list(sg.smx())
out.write("\nData:")
for idx_ in indices:
d_star_sq = xs.unit_cell().d_star_sq(idx_)
isc.at_d_star_sq(d_star_sq)
for m in ml:
idx = [int(x) for x in (m.r() * idx_)]
out.write("\n%s %s %s" %(idx[0], idx[1], idx[2]))
for sci in xrange(xs.scatterers().size()):
val = isc.get(sci, idx_)
out.write(" %.6f,%.6f" %(val.real, val.imag))
| def generate_isc_table_file(file_name,
xray_structure,
indices):
xs = xray_structure.deep_copy_scatterers()
for sc in xs.scatterers():
sc.flags.set_use_fp_fdp(False)
isc = ext.isotropic_scatterer_contribution(
xs.scatterers(),
xs.scattering_type_registry())
with open(file_name, "w") as out:
out.write("Title: generated from isotropic AFF")
out.write("\nScatterers:")
for sc in xs.scatterers():
out.write(" %s" %sc.label)
out.write("\nSymm: expanded")
sg = xs.space_group()
ml = list(sg.smx())
out.write("\nData:")
for idx_ in indices:
d_star_sq = xs.unit_cell().d_star_sq(idx_)
isc.at_d_star_sq(d_star_sq)
for m in ml:
idx = [int(x) for x in (m.r() * idx_)]
out.write("\n%s %s %s" %(idx[0], idx[1], idx[2]))
for sci in range(xs.scatterers().size()):
val = isc.get(sci, idx_)
out.write(" %.6f,%.6f" %(val.real, val.imag))
|
31,658 | def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
| def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abnormal-security-get-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
50,171 | def validate_result(result):
def validate_list(data):
return not any(map(invalid_float, data))
def validate_dict(data, keys):
for k in keys:
if k not in data or invalid_float(data[k]):
return False
return True
def invalid_float(value):
return value is None or value == float("inf") or value == float("-inf")
if "dimensions" not in result or not validate_dict(result["dimensions"], dimensions):
return False
if "travel_dimensions" not in result or not validate_dict(
result["travel_dimensions"], travel_dimensions
):
return False
if "extrusion_length" not in result or not validate_list(result["extrusion_length"]):
return False
if "extrusion_volume" not in result or not validate_list(result["extrusion_volume"]):
return False
if "printing_area" not in result or not validate_dict(
result["printing_area"], printing_area
):
return False
if "travel_area" not in result or not validate_dict(
result["travel_area"], travel_area
):
return False
if "total_time" not in result or invalid_float(result["total_time"]):
return False
return True
| def validate_result(result):
def validate_list(data):
return not any(map(invalid_float, data))
def validate_dict(data, keys):
for k in keys:
if k not in data or invalid_float(data[k]):
return False
return True
def invalid_float(value):
return value is None or value == float("inf") or value == float("-inf")
if "dimensions" not in result or not validate_dict(result["dimensions"], dimensions):
return False
if "travel_dimensions" not in result or not validate_dict(
result["travel_dimensions"], travel_dimensions
):
return False
if "extrusion_length" not in result or not validate_list(result["extrusion_length"]):
return False
if "extrusion_volume" not in result or not validate_list(result["extrusion_volume"]):
return False
if "printing_area" not in result or not validate_dict(
result["printing_area"], printing_area
):
return False
if "travel_area" not in result or not validate_dict(
result["travel_area"], area
):
return False
if "total_time" not in result or invalid_float(result["total_time"]):
return False
return True
|
4,304 | def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data
info : dict
Information from the file header
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger
scale : float
Scaling factor for the EEG data
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
| def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data
info : dict
Information from the file header
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger.
scale : float
Scaling factor for the EEG data
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
|
28,282 | def guids_from_list_str(s: str) -> Optional[Tuple[str, ...]]:
"""
Get tuple of guids from a python/json string representation of a list.
Extracts the guids from a string representation of a list, tuple,
or set of guids or a single guid.
Args:
s: input string
Returns:
Extracted guids as a tuple of strings.
If a provided string does not match the format, `None` will be returned.
For an empty list/tuple/set or empty string an empty set is returned.
Examples:
>>> guids_from_str(
"['07fd7195-c51e-44d6-a085-fa8274cf00d6', \
'070d7195-c51e-44d6-a085-fa8274cf00d6']")
will return
('07fd7195-c51e-44d6-a085-fa8274cf00d6',
'070d7195-c51e-44d6-a085-fa8274cf00d6')
"""
parsed = (ast.parse(s, mode='eval')).body
if not isinstance(parsed, (ast.List, ast.Tuple, ast.Set)):
return None
if not all([isinstance(e, ast.Constant) for e in parsed.elts]):
return None
return tuple (v.value for v in parsed.elts)
| def guids_from_list_str(s: str) -> Optional[Tuple[str, ...]]:
"""
Get tuple of guids from a python/json string representation of a list.
Extracts the guids from a string representation of a list, tuple,
or set of guids or a single guid.
Args:
s: input string
Returns:
Extracted guids as a tuple of strings.
If a provided string does not match the format, `None` will be returned.
For an empty list/tuple/set or empty string an empty set is returned.
Examples:
>>> guids_from_str(
"['07fd7195-c51e-44d6-a085-fa8274cf00d6', \
'070d7195-c51e-44d6-a085-fa8274cf00d6']")
will return
('07fd7195-c51e-44d6-a085-fa8274cf00d6',
'070d7195-c51e-44d6-a085-fa8274cf00d6')
"""
parsed = (ast.literal_eval(s, mode='eval')).body
if not isinstance(parsed, (ast.List, ast.Tuple, ast.Set)):
return None
if not all([isinstance(e, ast.Constant) for e in parsed.elts]):
return None
return tuple (v.value for v in parsed.elts)
|
48,444 | def _get_packages(module, pip, chdir, env):
'''Return the installed packages as a string, one package per line, like:
black==20.8b1
filelock==3.0.12
jedi==0.17.2
'''
# Try 'pip list' command first.
command = '%s list --format=freeze' % pip
lang_env = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (maybe pip version < 1.3) try 'pip freeze'.
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
out += '%s\n' % formatted_dep
# Clean output for pip warnings
out = "\n".join([line for line in out.split('\n')
if not line.startswith('You are using')
and not line.startswith('You should consider')])
return command, out, err
| def _get_packages(module, pip, chdir, env):
'''Return the installed packages as a string, one package per line, like:
black==20.8b1
filelock==3.0.12
jedi==0.17.2
'''
# Try 'pip list' command first.
command = '%s list --format=freeze' % pip
lang_env = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (maybe pip version < 1.3) try 'pip freeze'.
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
out += '%s\n' % formatted_dep
# Clean output for pip warnings
out = "\n".join(
line for line in out.split('\n')
if not line.startswith(('You are using', 'You should consider'))
)
return command, out, err
|
58,684 | def test_read_mixed_training_data_file():
training_data_file = "data/test_mixed_yaml_training_data/training_data.yml"
reader = RasaYAMLReader()
with pytest.warns(None) as record:
reader.read(training_data_file)
assert not len(record)
| def test_read_mixed_training_data_file():
training_data_file = "data/test_mixed_yaml_training_data/training_data.yml"
reader = RasaYAMLReader()
with pytest.warns(None) as record:
reader.read(training_data_file)
assert not len(record)
|
10,305 | def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
try:
with open("/etc/locale.gen", "r") as f:
lines = [line.replace(existing_line, new_line) for line in f.readlines()]
except IOError:
pass
try:
with open("/etc/locale.gen", "w") as f:
f.write("".join(lines))
except IOError:
pass
| def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
try:
with open("/etc/locale.gen", "r") as f:
lines = [line.replace(existing_line, new_line) for line in f]
except IOError:
pass
try:
with open("/etc/locale.gen", "w") as f:
f.write("".join(lines))
except IOError:
pass
|
45,134 | def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: the name of the feature flag
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the flipper.bucketing module, e.g.
PercentageBucketer, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
| def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: the name of the feature flag
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the flipper.bucketing module, e.g.
PercentageBucketer, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The `FeatureFlagClient` instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
|
40,554 | def imagecopy(cmd, source_resource_group_name, source_object_name, target_location,
target_resource_group_name, temporary_resource_group_name='image-copy-rg',
source_type='image', cleanup=False, parallel_degree=-1, tags=None, target_name=None,
target_subscription=None, export_as_snapshot='false', timeout=3600):
if cleanup:
# If --cleanup is set, forbid using an existing temporary resource group name.
# It is dangerous to clean up an existing resource group.
cli_cmd = prepare_cli_command(['group', 'exists', '-n', temporary_resource_group_name],
output_as_json=False)
cmd_output = run_cli_command(cli_cmd)
if 'true' in cmd_output:
raise CLIError('Don\'t specify an existing resource group in --temporary-resource-group-name '
'when --cleanup is set')
# get the os disk id from source vm/image
logger.warning("Getting OS disk ID of the source VM/image")
cli_cmd = prepare_cli_command([source_type, 'show',
'--name', source_object_name,
'--resource-group', source_resource_group_name])
json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)
if json_cmd_output['storageProfile']['dataDisks']:
logger.warning(
"Data disks in the source detected, but are ignored by this extension!")
source_os_disk_id = None
source_os_disk_type = None
try:
source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']
if source_os_disk_id is None:
raise TypeError
source_os_disk_type = "DISK"
logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
except TypeError:
try:
source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri']
if source_os_disk_id is None:
raise TypeError
source_os_disk_type = "BLOB"
logger.debug("found %s: %s", source_os_disk_type,
source_os_disk_id)
except TypeError:
try: # images created by e.g. image-copy extension
source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id']
if source_os_disk_id is None:
raise TypeError
source_os_disk_type = "SNAPSHOT"
logger.debug("found %s: %s", source_os_disk_type,
source_os_disk_id)
except TypeError:
pass
if source_os_disk_type is None or source_os_disk_id is None:
logger.error(
'Unable to locate a supported OS disk type in the provided source object')
raise CLIError('Invalid OS Disk Source Type')
source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
logger.debug("source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s",
source_os_disk_type, source_os_disk_id, source_os_type)
# create source snapshots
# TODO: skip creating another snapshot when the source is a snapshot
logger.warning("Creating source snapshot")
source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'
snapshot_location = json_cmd_output['location']
if source_os_disk_type == "BLOB":
source_storage_account_id = get_storage_account_id_from_blob_path(cmd,
source_os_disk_id,
source_resource_group_name)
cli_cmd = prepare_cli_command(['snapshot', 'create',
'--name', source_os_disk_snapshot_name,
'--location', snapshot_location,
'--resource-group', source_resource_group_name,
'--source', source_os_disk_id,
'--source-storage-account-id', source_storage_account_id,
'--hyper-v-generation', json_cmd_output['hyperVGeneration']])
else:
cli_cmd = prepare_cli_command(['snapshot', 'create',
'--name', source_os_disk_snapshot_name,
'--location', snapshot_location,
'--resource-group', source_resource_group_name,
'--source', source_os_disk_id,
'--hyper-v-generation', json_cmd_output['hyperVGeneration']])
run_cli_command(cli_cmd)
# Get SAS URL for the snapshotName
logger.warning(
"Getting sas url for the source snapshot with timeout: %d seconds", timeout)
if timeout < 3600:
logger.error("Timeout should be greater than 3600 seconds")
raise CLIError('Invalid Timeout')
cli_cmd = prepare_cli_command(['snapshot', 'grant-access',
'--name', source_os_disk_snapshot_name,
'--resource-group', source_resource_group_name,
'--duration-in-seconds', str(timeout)])
json_output = run_cli_command(cli_cmd, return_as_json=True)
source_os_disk_snapshot_url = json_output['accessSas']
logger.debug("source os disk snapshot url: %s",
source_os_disk_snapshot_url)
# Start processing in the target locations
transient_resource_group_name = temporary_resource_group_name
# pick the first location for the temp group
transient_resource_group_location = target_location[0].strip()
create_resource_group(transient_resource_group_name,
transient_resource_group_location,
target_subscription)
target_locations_count = len(target_location)
logger.warning("Target location count: %s", target_locations_count)
create_resource_group(target_resource_group_name,
target_location[0].strip(),
target_subscription)
try:
# try to get a handle on arm's 409s
azure_pool_frequency = 5
if target_locations_count >= 5:
azure_pool_frequency = 15
elif target_locations_count >= 3:
azure_pool_frequency = 10
if (target_locations_count == 1) or (parallel_degree == 1):
# Going to copy to targets one-by-one
logger.debug("Starting sync process for all locations")
for location in target_location:
location = location.strip()
create_target_image(cmd, location, transient_resource_group_name, source_type,
source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
source_os_type, target_resource_group_name, azure_pool_frequency,
tags, target_name, target_subscription, export_as_snapshot, timeout,
json_cmd_output['hyperVGeneration'])
else:
if parallel_degree == -1:
pool = Pool(target_locations_count)
else:
pool = Pool(min(parallel_degree, target_locations_count))
tasks = []
for location in target_location:
location = location.strip()
tasks.append((location, transient_resource_group_name, source_type,
source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
source_os_type, target_resource_group_name, azure_pool_frequency,
tags, target_name, target_subscription, export_as_snapshot, timeout,
json_cmd_output['hyperVGeneration']))
logger.warning("Starting async process for all locations")
for task in tasks:
pool.apply_async(create_target_image, task)
pool.close()
pool.join()
except KeyboardInterrupt:
logger.warning('User cancelled the operation')
if cleanup:
logger.warning('To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
'You can use the following command: az resource list --tag created_by=image-copy-extension')
pool.terminate()
return
# Cleanup
if cleanup:
logger.warning('Deleting transient resources')
# Delete resource group
cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',
'--name', transient_resource_group_name],
subscription=target_subscription)
run_cli_command(cli_cmd)
# Revoke sas for source snapshot
cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',
'--name', source_os_disk_snapshot_name,
'--resource-group', source_resource_group_name])
run_cli_command(cli_cmd)
# Delete source snapshot
# TODO: skip this if source is snapshot and not creating a new one
cli_cmd = prepare_cli_command(['snapshot', 'delete',
'--name', source_os_disk_snapshot_name,
'--resource-group', source_resource_group_name])
run_cli_command(cli_cmd)
| def imagecopy(cmd, source_resource_group_name, source_object_name, target_location,
target_resource_group_name, temporary_resource_group_name='image-copy-rg',
source_type='image', cleanup=False, parallel_degree=-1, tags=None, target_name=None,
target_subscription=None, export_as_snapshot='false', timeout=3600):
if cleanup:
# If --cleanup is set, forbid using an existing temporary resource group name.
# It is dangerous to clean up an existing resource group.
cli_cmd = prepare_cli_command(['group', 'exists', '-n', temporary_resource_group_name],
output_as_json=False)
cmd_output = run_cli_command(cli_cmd)
if 'true' in cmd_output:
raise CLIError('Don\'t specify an existing resource group in --temporary-resource-group-name '
'when --cleanup is set')
# get the os disk id from source vm/image
logger.warning("Getting OS disk ID of the source VM/image")
cli_cmd = prepare_cli_command([source_type, 'show',
'--name', source_object_name,
'--resource-group', source_resource_group_name])
json_cmd_output = run_cli_command(cli_cmd, return_as_json=True)
if json_cmd_output['storageProfile']['dataDisks']:
logger.warning(
"Data disks in the source detected, but are ignored by this extension!")
source_os_disk_id = None
source_os_disk_type = None
try:
source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['managedDisk']['id']
if source_os_disk_id is None:
raise TypeError
source_os_disk_type = "DISK"
logger.debug("found %s: %s", source_os_disk_type, source_os_disk_id)
except TypeError:
try:
source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['blobUri']
if source_os_disk_id is None:
raise TypeError
source_os_disk_type = "BLOB"
logger.debug("found %s: %s", source_os_disk_type,
source_os_disk_id)
except TypeError:
try: # images created by e.g. image-copy extension
source_os_disk_id = json_cmd_output['storageProfile']['osDisk']['snapshot']['id']
if source_os_disk_id is None:
raise TypeError
source_os_disk_type = "SNAPSHOT"
logger.debug("found %s: %s", source_os_disk_type,
source_os_disk_id)
except TypeError:
pass
if source_os_disk_type is None or source_os_disk_id is None:
logger.error(
'Unable to locate a supported OS disk type in the provided source object')
raise CLIError('Invalid OS Disk Source Type')
source_os_type = json_cmd_output['storageProfile']['osDisk']['osType']
logger.debug("source_os_disk_type: %s. source_os_disk_id: %s. source_os_type: %s",
source_os_disk_type, source_os_disk_id, source_os_type)
# create source snapshots
# TODO: skip creating another snapshot when the source is a snapshot
logger.warning("Creating source snapshot")
source_os_disk_snapshot_name = source_object_name + '_os_disk_snapshot'
snapshot_location = json_cmd_output['location']
if source_os_disk_type == "BLOB":
source_storage_account_id = get_storage_account_id_from_blob_path(cmd,
source_os_disk_id,
source_resource_group_name)
cli_cmd = prepare_cli_command(['snapshot', 'create',
'--name', source_os_disk_snapshot_name,
'--location', snapshot_location,
'--resource-group', source_resource_group_name,
'--source', source_os_disk_id,
'--source-storage-account-id', source_storage_account_id,
'--hyper-v-generation', hyper_v_generation])
else:
cli_cmd = prepare_cli_command(['snapshot', 'create',
'--name', source_os_disk_snapshot_name,
'--location', snapshot_location,
'--resource-group', source_resource_group_name,
'--source', source_os_disk_id,
'--hyper-v-generation', json_cmd_output['hyperVGeneration']])
run_cli_command(cli_cmd)
# Get SAS URL for the snapshotName
logger.warning(
"Getting sas url for the source snapshot with timeout: %d seconds", timeout)
if timeout < 3600:
logger.error("Timeout should be greater than 3600 seconds")
raise CLIError('Invalid Timeout')
cli_cmd = prepare_cli_command(['snapshot', 'grant-access',
'--name', source_os_disk_snapshot_name,
'--resource-group', source_resource_group_name,
'--duration-in-seconds', str(timeout)])
json_output = run_cli_command(cli_cmd, return_as_json=True)
source_os_disk_snapshot_url = json_output['accessSas']
logger.debug("source os disk snapshot url: %s",
source_os_disk_snapshot_url)
# Start processing in the target locations
transient_resource_group_name = temporary_resource_group_name
# pick the first location for the temp group
transient_resource_group_location = target_location[0].strip()
create_resource_group(transient_resource_group_name,
transient_resource_group_location,
target_subscription)
target_locations_count = len(target_location)
logger.warning("Target location count: %s", target_locations_count)
create_resource_group(target_resource_group_name,
target_location[0].strip(),
target_subscription)
try:
# try to get a handle on arm's 409s
azure_pool_frequency = 5
if target_locations_count >= 5:
azure_pool_frequency = 15
elif target_locations_count >= 3:
azure_pool_frequency = 10
if (target_locations_count == 1) or (parallel_degree == 1):
# Going to copy to targets one-by-one
logger.debug("Starting sync process for all locations")
for location in target_location:
location = location.strip()
create_target_image(cmd, location, transient_resource_group_name, source_type,
source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
source_os_type, target_resource_group_name, azure_pool_frequency,
tags, target_name, target_subscription, export_as_snapshot, timeout,
json_cmd_output['hyperVGeneration'])
else:
if parallel_degree == -1:
pool = Pool(target_locations_count)
else:
pool = Pool(min(parallel_degree, target_locations_count))
tasks = []
for location in target_location:
location = location.strip()
tasks.append((location, transient_resource_group_name, source_type,
source_object_name, source_os_disk_snapshot_name, source_os_disk_snapshot_url,
source_os_type, target_resource_group_name, azure_pool_frequency,
tags, target_name, target_subscription, export_as_snapshot, timeout,
json_cmd_output['hyperVGeneration']))
logger.warning("Starting async process for all locations")
for task in tasks:
pool.apply_async(create_target_image, task)
pool.close()
pool.join()
except KeyboardInterrupt:
logger.warning('User cancelled the operation')
if cleanup:
logger.warning('To cleanup temporary resources look for ones tagged with "image-copy-extension". \n'
'You can use the following command: az resource list --tag created_by=image-copy-extension')
pool.terminate()
return
# Cleanup
if cleanup:
logger.warning('Deleting transient resources')
# Delete resource group
cli_cmd = prepare_cli_command(['group', 'delete', '--no-wait', '--yes',
'--name', transient_resource_group_name],
subscription=target_subscription)
run_cli_command(cli_cmd)
# Revoke sas for source snapshot
cli_cmd = prepare_cli_command(['snapshot', 'revoke-access',
'--name', source_os_disk_snapshot_name,
'--resource-group', source_resource_group_name])
run_cli_command(cli_cmd)
# Delete source snapshot
# TODO: skip this if source is snapshot and not creating a new one
cli_cmd = prepare_cli_command(['snapshot', 'delete',
'--name', source_os_disk_snapshot_name,
'--resource-group', source_resource_group_name])
run_cli_command(cli_cmd)
|
38,562 | def pointset(
p: np.ndarray[Any, np.dtype[np.float64]], max_diag: bool = False
) -> np.ndarray[Any, np.dtype[np.float64]]:
"""Compute mutual distance between all points in a point set.
Parameters:
p (np.ndarray, 3xn): Points
max_diag (boolean, defaults to False): If True, the diagonal values will
are set to a value larger than any other distance for each point,
rather than 0.
Returns:
np.array (nxn): Distance between points.
"""
if p.ndim == 1 or p.ndim == 2 and p.shape[-1] == 1:
return np.zeros((1, 1))
# Use scipy spatial function to compute distances between points
d = scidist.cdist(p.T, p.T, "euclidean")
if max_diag:
# Get the maximum per row (could have used column, the distance matrix is
# symmetric), and add it to the diagonal
row_max = np.max(d, axis=1)
d += 2 * np.diag(row_max)
return d
| def pointset(
p: np.ndarray[Any, np.dtype[np.float64]], max_diag: bool = False
) -> np.ndarray[Any, np.dtype[np.float64]]:
"""Compute mutual distance between all points in a point set.
Parameters:
p (np.ndarray, 3xn): Points
max_diag (boolean, defaults to False): If True, the diagonal values will
to each point is set to twice the maximum of the distances for that point,
rather than 0.
Returns:
np.array (nxn): Distance between points.
"""
if p.ndim == 1 or p.ndim == 2 and p.shape[-1] == 1:
return np.zeros((1, 1))
# Use scipy spatial function to compute distances between points
d = scidist.cdist(p.T, p.T, "euclidean")
if max_diag:
# Get the maximum per row (could have used column, the distance matrix is
# symmetric), and add it to the diagonal
row_max = np.max(d, axis=1)
d += 2 * np.diag(row_max)
return d
|
17,731 | def base10_to_base62_alph_num(base10_No):
'''Converst base 10 to base 62 so pdb/psf files can add may more than
9999 atoms and 999 residues.'''
'''base10_No = the base-10 number that you want to convert to base-62)'''
base62_No = 62
base10_No = int(base10_No)
whole_no =1
remainder = changeDigit_base10_to_base62_alph_num(int(base10_No % base62_No))
base62_Values = str(remainder)
power =1
while whole_no != 0:
whole_no =int(base10_No / base62_No**power)
if whole_no == base62_No :
base62_Values = str(0)+base62_Values
elif (whole_no != 0) and (whole_no > base62_No) :
base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no % base62_No))) + base62_Values
elif (whole_no != 0) and (whole_no < base62_No):
base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no))) + base62_Values
power =power+1
return base62_Values
| def base10_to_base62_alph_num(base10_No):
'''Converts base 10 to base 62 so pdb/psf files can add may more than
9999 atoms and 999 residues.'''
'''base10_No = the base-10 number that you want to convert to base-62)'''
base62_No = 62
base10_No = int(base10_No)
whole_no =1
remainder = changeDigit_base10_to_base62_alph_num(int(base10_No % base62_No))
base62_Values = str(remainder)
power =1
while whole_no != 0:
whole_no =int(base10_No / base62_No**power)
if whole_no == base62_No :
base62_Values = str(0)+base62_Values
elif (whole_no != 0) and (whole_no > base62_No) :
base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no % base62_No))) + base62_Values
elif (whole_no != 0) and (whole_no < base62_No):
base62_Values = str(changeDigit_base10_to_base62_alph_num(int(whole_no))) + base62_Values
power =power+1
return base62_Values
|
3,261 | def discard_event(job, attachments):
"""
Refunds consumed quotas for an event and its attachments.
For the event and each dropped attachment, an outcome
FILTERED(discarded-hash) is emitted.
:param job: The job context container.
:param attachments: The full list of attachments to filter.
"""
project = job["event"].project
quotas.refund(
project,
key=job["project_key"],
timestamp=job["start_time"],
category=job["category"],
quantity=1,
)
track_outcome(
org_id=project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.FILTERED,
reason=FilterStatKeys.DISCARDED_HASH,
timestamp=to_datetime(job["start_time"]),
event_id=job["event"].event_id,
category=job["category"],
)
attachment_quantity = 0
for attachment in attachments:
# Quotas are counted with at least ``1`` for attachments.
attachment_quantity += attachment.size or 1
track_outcome(
org_id=project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.FILTERED,
reason=FilterStatKeys.DISCARDED_HASH,
timestamp=to_datetime(job["start_time"]),
event_id=job["event"].event_id,
category=DataCategory.ATTACHMENT,
quantity=attachment.size,
)
if attachment_quantity:
quotas.refund(
project,
key=job["project_key"],
timestamp=job["start_time"],
category=DataCategory.ATTACHMENT,
quantity=attachment_quantity,
)
metrics.incr(
"events.discarded",
skip_internal=True,
tags={"organization_id": project.organization_id, "platform": job["platform"]},
)
| def discard_event(job, attachments):
"""
Refunds consumed quotas for an event and its attachments.
For the event and each dropped attachment, an outcome
FILTERED(discarded-hash) is emitted.
:param job: The job context container.
:param attachments: The full list of attachments to filter.
"""
project = job["event"].project
quotas.refund(
project,
key=job["project_key"],
timestamp=job["start_time"],
category=job["category"],
quantity=1,
)
track_outcome(
org_id=project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.FILTERED,
reason=FilterStatKeys.DISCARDED_HASH,
timestamp=to_datetime(job["start_time"]),
event_id=job["event"].event_id,
category=job["category"],
)
attachment_quantity = 0
for attachment in attachments:
# Quotas are counted with at least ``1`` for attachments.
attachment_quantity += attachment.size or 1
track_outcome(
org_id=project.organization_id,
project_id=job["project_id"],
key_id=job["key_id"],
outcome=Outcome.FILTERED,
reason=FilterStatKeys.DISCARDED_HASH,
timestamp=to_datetime(job["start_time"]),
event_id=job["event"].event_id,
category=DataCategory.ATTACHMENT,
quantity=attachment.size,
)
if attachment_quantity:
quotas.refund(
project,
key=job["project_key"],
timestamp=job["start_time"],
category=DataCategory.ATTACHMENT,
quantity=attachment_quantity,
)
metrics.incr(
"events.discarded",
skip_internal=True,
tags={"platform": job["platform"]},
)
|
31,229 | def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = demisto.getArg("connector_id")
url_suffix = '/connectors/%s/connector_runs' % connector_id
human_readable = []
context: Dict[str, Any] = {}
connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix)
if connectors:
keys = [
"id", "start_time",
"end_time", "success",
"total_payload_count",
"processed_palyoad_count",
"failed_payload_count",
"processed_assets_count",
"assets_with_tags_reset_count",
"processed_scanner_vuln_count",
"created_scanner_vuln_count",
"closed_scanner_vuln_count",
"autoclosed_scanner_vuln_count",
"reopened_scanner_vuln_count",
"closed_vuln_count",
"autoclosed_vuln_count",
"reopened_vuln_count"
]
context_list = parse_response(connectors, keys, keys)
for connector in connectors:
curr_dict = {
"id": connector.get("id"),
"start_time": connector.get("start_time"),
"end_time": connector.get("end_time"),
"success": connector.get("success"),
"total_payload_count": connector.get("total_payload_count"),
"processed_payload_count": connector.get("total_payload_count"),
"failed_payload_count": connector.get("failed_payload_count"),
"processed_assets_count": connector.get("processed_assets_count"),
"assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"),
"processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"),
"updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"),
"created_scanner_vuln_count": connector.get("created_scanner_vuln_count"),
"closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"),
"autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"),
"reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"),
"closed_vuln_count": connector.get("closed_vuln_count"),
"autoclosed_vuln_count": connector.get("closed_vuln_count"),
"reopened_vuln_count": connector.get("reopened_vuln_count")
}
human_readable.append(curr_dict)
context = {
'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list
}
human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True)
else:
human_readable_markdown = "no connectors in get response."
return human_readable_markdown, context, connectors
| def get_connector_runs(client: Client, *_) -> Tuple[str, Dict[str, Any], List[Dict[str, Any]]]:
"""Get Connector Runs command.
Args:
client: Client which connects to api
Returns:
Human Readable
Entry Context
Raw Data
"""
connector_id = demisto.getArg("connector_id")
url_suffix = f'/connectors/{connector_id}/connector_runs'
human_readable = []
context: Dict[str, Any] = {}
connectors: List[Dict[str, Any]] = client.http_request(message='GET', suffix=url_suffix)
if connectors:
keys = [
"id", "start_time",
"end_time", "success",
"total_payload_count",
"processed_palyoad_count",
"failed_payload_count",
"processed_assets_count",
"assets_with_tags_reset_count",
"processed_scanner_vuln_count",
"created_scanner_vuln_count",
"closed_scanner_vuln_count",
"autoclosed_scanner_vuln_count",
"reopened_scanner_vuln_count",
"closed_vuln_count",
"autoclosed_vuln_count",
"reopened_vuln_count"
]
context_list = parse_response(connectors, keys, keys)
for connector in connectors:
curr_dict = {
"id": connector.get("id"),
"start_time": connector.get("start_time"),
"end_time": connector.get("end_time"),
"success": connector.get("success"),
"total_payload_count": connector.get("total_payload_count"),
"processed_payload_count": connector.get("total_payload_count"),
"failed_payload_count": connector.get("failed_payload_count"),
"processed_assets_count": connector.get("processed_assets_count"),
"assets_with_tags_reset_count": connector.get("assets_with_tags_reset_count"),
"processed_scanner_vuln_count": connector.get("processed_scanner_vuln_count"),
"updated_scanner_vuln_count": connector.get("updated_scanner_vuln_count"),
"created_scanner_vuln_count": connector.get("created_scanner_vuln_count"),
"closed_scanner_vuln_count": connector.get("closed_scanner_vuln_count"),
"autoclosed_scanner_vuln_count": connector.get("autoclosed_scanner_vuln_count"),
"reopened_scanner_vuln_count": connector.get("reopened_scanner_vuln_count"),
"closed_vuln_count": connector.get("closed_vuln_count"),
"autoclosed_vuln_count": connector.get("closed_vuln_count"),
"reopened_vuln_count": connector.get("reopened_vuln_count")
}
human_readable.append(curr_dict)
context = {
'Kenna.ConnectorRunsList(val.ID === obj.ID)': context_list
}
human_readable_markdown = tableToMarkdown('Kenna Connector Runs', human_readable, removeNull=True)
else:
human_readable_markdown = "no connectors in get response."
return human_readable_markdown, context, connectors
|
33,007 | def __map_result_to_song_data(result: dict) -> dict:
artists = ", ".join(map(lambda a: a['name'], result['artists']))
video_id = result['videoId']
song_data = {
'name': result['title'],
'type': result['resultType'],
'artist': artists,
'length': __parse_duration(result.get('duration', None)),
'link': f'https://www.youtube.com/watch?v={video_id}',
'position': 0
}
if 'album' in result:
song_data['album'] = result['album']['name']
return song_data
| def __map_result_to_song_data(result: dict) -> dict:
artists = ", ".join(map(lambda a: a['name'], result['artists']))
video_id = result['videoId']
song_data = {
'name': result['title'],
'type': result['resultType'],
'artist': artists,
'length': __parse_duration(result.get('duration')),
'link': f'https://www.youtube.com/watch?v={video_id}',
'position': 0
}
if 'album' in result:
song_data['album'] = result['album']['name']
return song_data
|
34,287 | def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
p0 = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
results = Manager().dict()
p1 = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, status should return 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
| def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
p0 = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
results = Manager().dict()
training_request = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, status should return 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
|
58,043 | def get_blocklist(client: Client, args: dict) -> CommandResults:
"""
Retrieve the blocklist (SentinelOne Term: Blacklist)
"""
tenant_str = args.get('global', 'false')
tenant = tenant_str.lower() == 'true'
sort_by = "updatedAt"
sort_order = "desc"
offset = int(args.get('offset', "0"))
limit = int(args.get('limit', "100"))
group_ids = args.get('group_ids', None)
site_ids = args.get('site_ids', None)
account_ids = args.get('account_ids', None)
contents = []
block_list = client.get_blocklist_request(tenant=tenant, group_ids=group_ids, site_ids=site_ids,
account_ids=account_ids, skip=offset, limit=limit,
sort_by=sort_by, sort_order=sort_order)
for block in block_list:
contents.append({
'CreatedAt': block.get('createdAt'),
'Description': block.get('description'),
'ID': block.get('id'),
'OSType': block.get('osType'),
'ScopeName': block.get('scopeName'),
'ScopePath': block.get('scopePath'),
'Source': block.get('source'),
'Type': block.get('type'),
'UpdatedAt': block.get('updatedAt'),
'UserId': block.get('userId'),
'Value': block.get('value')
})
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Blocklist', contents, removeNull=True),
outputs_prefix='SentinelOne.Blocklist',
outputs_key_field='Value',
outputs=contents,
raw_response=block_list)
| def get_blocklist(client: Client, args: dict) -> CommandResults:
"""
Retrieve the blocklist (SentinelOne Term: Blacklist)
"""
tenant_str = args.get('global', 'false')
tenant = tenant_str.lower() == 'true'
sort_by = "updatedAt"
sort_order = "desc"
offset = arg_to_number(args.get('offset', "0"))
limit = arg_to_number(args.get('limit', "100"))
group_ids = args.get('group_ids', None)
site_ids = args.get('site_ids', None)
account_ids = args.get('account_ids', None)
contents = []
block_list = client.get_blocklist_request(tenant=tenant, group_ids=group_ids, site_ids=site_ids,
account_ids=account_ids, skip=offset, limit=limit,
sort_by=sort_by, sort_order=sort_order)
for block in block_list:
contents.append({
'CreatedAt': block.get('createdAt'),
'Description': block.get('description'),
'ID': block.get('id'),
'OSType': block.get('osType'),
'ScopeName': block.get('scopeName'),
'ScopePath': block.get('scopePath'),
'Source': block.get('source'),
'Type': block.get('type'),
'UpdatedAt': block.get('updatedAt'),
'UserId': block.get('userId'),
'Value': block.get('value')
})
return CommandResults(
readable_output=tableToMarkdown('SentinelOne Blocklist', contents, removeNull=True),
outputs_prefix='SentinelOne.Blocklist',
outputs_key_field='Value',
outputs=contents,
raw_response=block_list)
|
10,336 | def main():
protocols = [
'http',
'https',
'email',
'email_json',
'sms',
'sqs',
'application',
'lambda',
]
argument_spec = dict(
msg=dict(required=True, aliases=['default']),
subject=dict(),
topic=dict(required=True),
message_attributes=dict(type='dict'),
message_structure=dict(choices=['json', 'string'], default='json'),
)
for p in protocols:
argument_spec[p] = dict()
module = AnsibleAWSModule(argument_spec=argument_spec)
sns_kwargs = dict(
Message=module.params['msg'],
MessageStructure=module.params['message_structure']
)
if module.params['subject']:
sns_kwargs.update({"Subject": module.params['subject']})
if module.params['message_attributes']:
if module.params['message_structure'] != 'string':
module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
sns_kwargs['MessageAttributes'] = module.params['message_attributes']
dict_msg = {
'default': sns_kwargs['Message']
}
for p in protocols:
if module.params[p]:
if sns_kwargs['MessageStructure'] != 'json':
module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
dict_msg[p.replace('_', '-')] = module.params[p]
client = module.client('sns')
topic = module.params['topic']
if ':' in topic:
# Short names can't contain ':' so we'll assume this is the full ARN
sns_kwargs['TopicArn'] = topic
else:
sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)
if not sns_kwargs['TopicArn']:
module.fail_json(msg='Could not find topic: {0}'.format(topic))
if sns_kwargs['MessageStructure'] == 'json':
sns_kwargs['Message'] = json.dumps(dict_msg)
try:
result = client.publish(**sns_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to publish message')
module.exit_json(msg='OK', message_id=result['MessageId'])
| def main():
protocols = [
'http',
'https',
'email',
'email_json',
'sms',
'sqs',
'application',
'lambda',
]
argument_spec = dict(
msg=dict(required=True, aliases=['default']),
subject=dict(),
topic=dict(required=True),
message_attributes=dict(type='dict'),
message_structure=dict(choices=['json', 'string'], default='json'),
)
for p in protocols:
argument_spec[p] = dict()
module = AnsibleAWSModule(argument_spec=argument_spec)
sns_kwargs = dict(
Message=module.params['msg'],
MessageStructure=module.params['message_structure']
)
if module.params['subject']:
sns_kwargs['Subject'] = module.params['subject']
if module.params['message_attributes']:
if module.params['message_structure'] != 'string':
module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
sns_kwargs['MessageAttributes'] = module.params['message_attributes']
dict_msg = {
'default': sns_kwargs['Message']
}
for p in protocols:
if module.params[p]:
if sns_kwargs['MessageStructure'] != 'json':
module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
dict_msg[p.replace('_', '-')] = module.params[p]
client = module.client('sns')
topic = module.params['topic']
if ':' in topic:
# Short names can't contain ':' so we'll assume this is the full ARN
sns_kwargs['TopicArn'] = topic
else:
sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)
if not sns_kwargs['TopicArn']:
module.fail_json(msg='Could not find topic: {0}'.format(topic))
if sns_kwargs['MessageStructure'] == 'json':
sns_kwargs['Message'] = json.dumps(dict_msg)
try:
result = client.publish(**sns_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to publish message')
module.exit_json(msg='OK', message_id=result['MessageId'])
|
22,646 | def get_facts(
state,
name_or_cls,
args=None,
kwargs=None,
ensure_hosts=None,
apply_failed_hosts=True,
):
'''
Get a single fact for all hosts in the state.
'''
# TODO: tidy up the whole executor argument handling here!
global_kwarg_overrides = {}
if kwargs:
global_kwarg_overrides.update({
key: kwargs.pop(key)
for key in get_executor_kwarg_keys()
if key in kwargs
})
if isclass(name_or_cls) and issubclass(name_or_cls, (FactBase, ShortFactBase)):
fact = name_or_cls()
name = fact.name
else:
fact = get_fact_class(name_or_cls)()
name = name_or_cls
if isinstance(fact, ShortFactBase):
return get_short_facts(state, fact, args=args, ensure_hosts=ensure_hosts)
args = args or ()
kwargs = kwargs or {}
if args or kwargs:
# Merges args & kwargs into a single kwargs dictionary
kwargs = getcallargs(fact.command, *args, **kwargs)
logger.debug('Getting fact: {0} ({1}) (ensure_hosts: {2})'.format(
name, get_kwargs_str(kwargs), ensure_hosts,
))
# Apply args or defaults
sudo = state.config.SUDO
sudo_user = state.config.SUDO_USER
su_user = state.config.SU_USER
ignore_errors = state.config.IGNORE_ERRORS
shell_executable = state.config.SHELL
use_sudo_password = state.config.USE_SUDO_PASSWORD
env = state.config.ENV
success_exit_codes = [0]
# Facts can override the shell (winrm powershell vs cmd support)
if fact.shell_executable:
shell_executable = fact.shell_executable
# Timeout for operations !== timeout for connect (config.CONNECT_TIMEOUT)
timeout = None
# If inside an operation, fetch global arguments
current_global_kwargs = state.current_op_global_kwargs or {}
# Allow `Host.get_fact` calls to explicitly override these
current_global_kwargs.update(global_kwarg_overrides)
if current_global_kwargs:
sudo = current_global_kwargs.get('sudo', sudo)
sudo_user = current_global_kwargs.get('sudo_user', sudo_user)
use_sudo_password = current_global_kwargs.get('use_sudo_password', use_sudo_password)
su_user = current_global_kwargs.get('su_user', su_user)
ignore_errors = current_global_kwargs.get('ignore_errors', ignore_errors)
timeout = current_global_kwargs.get('timeout', timeout)
env = current_global_kwargs.get('env', env)
success_exit_codes = current_global_kwargs.get('success_exit_codes', [0])
# Make a hash which keeps facts unique - but usable cross-deploy/threads.
# Locks are used to maintain order.
fact_hash = make_hash((name, kwargs, sudo, sudo_user, su_user, ignore_errors, env))
# Already got this fact? Unlock and return them
current_facts = state.facts.get(fact_hash, {})
if current_facts:
if not ensure_hosts or all(
host in current_facts for host in ensure_hosts
):
return current_facts
with FACT_LOCK:
# Add any hosts we must have, whether considered in the inventory or not
# (these hosts might be outside the --limit or current op limit_hosts).
hosts = set(state.inventory.iter_active_hosts())
if ensure_hosts:
hosts.update(ensure_hosts)
# Execute the command for each state inventory in a greenlet
greenlet_to_host = {}
for host in hosts:
if host in current_facts:
continue
# Generate actual arguments by passing strings as jinja2 templates
host_kwargs = {
key: get_arg_value(state, host, arg)
for key, arg in kwargs.items()
}
command = _make_command(fact.command, host_kwargs)
requires_command = _make_command(fact.requires_command, host_kwargs)
if requires_command:
command = StringCommand(
# Command doesn't exist, return 0 *or* run & return fact command
'!', 'command', '-v', requires_command, '>/dev/null', '||', command,
)
greenlet = state.fact_pool.spawn(
host.run_shell_command,
command,
sudo=sudo,
sudo_user=sudo_user,
use_sudo_password=use_sudo_password,
success_exit_codes=success_exit_codes,
su_user=su_user,
timeout=timeout,
env=env,
shell_executable=shell_executable,
print_output=state.print_fact_output,
print_input=state.print_fact_input,
return_combined_output=True,
)
greenlet_to_host[greenlet] = host
# Wait for all the commands to execute
progress_prefix = 'fact: {0} ({1})'.format(name, get_kwargs_str(kwargs))
with progress_spinner(
greenlet_to_host.values(),
prefix_message=progress_prefix,
) as progress:
for greenlet in gevent.iwait(greenlet_to_host.keys()):
host = greenlet_to_host[greenlet]
progress(host)
hostname_facts = {}
failed_hosts = set()
# Collect the facts and any failures
for greenlet, host in six.iteritems(greenlet_to_host):
status = False
stdout = []
try:
status, combined_output_lines = greenlet.get()
except (timeout_error, socket_error, SSHException) as e:
failed_hosts.add(host)
log_host_command_error(
host, e,
timeout=timeout,
)
stdout, stderr = split_combined_output(combined_output_lines)
data = fact.default()
if status:
if stdout:
data = fact.process(stdout)
elif stderr:
first_line = stderr[0]
if (
sudo_user
and re.match(SUDO_REGEX, first_line)
):
status = True
if (
su_user
and any(re.match(regex, first_line) for regex in SU_REGEXES)
):
status = True
if not status:
failed_hosts.add(host)
if not state.print_fact_output:
print_host_combined_output(host, combined_output_lines)
log_error_or_warning(host, ignore_errors, description=(
'could not load fact: {0} {1}'
).format(name, get_kwargs_str(kwargs)))
hostname_facts[host] = data
log = 'Loaded fact {0}{1}'.format(
click.style(name, bold=True),
' ({0})'.format(get_kwargs_str(kwargs)) if kwargs else '',
)
if state.print_fact_info:
logger.info(log)
else:
logger.debug(log)
# Check we've not failed
if not ignore_errors and apply_failed_hosts:
state.fail_hosts(failed_hosts)
# Assign the facts
state.facts.setdefault(fact_hash, {}).update(hostname_facts)
return state.facts[fact_hash]
| def get_facts(
state,
name_or_cls,
args=None,
kwargs=None,
ensure_hosts=None,
apply_failed_hosts=True,
):
'''
Get a single fact for all hosts in the state.
'''
# TODO: tidy up the whole executor argument handling here!
global_kwarg_overrides = {}
if kwargs:
global_kwarg_overrides.update({
key: kwargs.pop(key)
for key in get_executor_kwarg_keys()
if key in kwargs
})
if isclass(name_or_cls) and issubclass(name_or_cls, (FactBase, ShortFactBase)):
fact = name_or_cls()
name = fact.name
else:
fact = get_fact_class(name_or_cls)()
name = name_or_cls
if isinstance(fact, ShortFactBase):
return get_short_facts(state, fact, args=args, ensure_hosts=ensure_hosts)
args = args or ()
kwargs = kwargs or {}
if args or kwargs:
# Merges args & kwargs into a single kwargs dictionary
kwargs = getcallargs(fact.command, *args, **kwargs)
logger.debug('Getting fact: {0} ({1}) (ensure_hosts: {2})'.format(
name, get_kwargs_str(kwargs), ensure_hosts,
))
# Apply args or defaults
sudo = state.config.SUDO
sudo_user = state.config.SUDO_USER
su_user = state.config.SU_USER
ignore_errors = state.config.IGNORE_ERRORS
shell_executable = state.config.SHELL
use_sudo_password = state.config.USE_SUDO_PASSWORD
env = state.config.ENV
success_exit_codes = [0]
# Facts can override the shell (winrm powershell vs cmd support)
if fact.shell_executable:
shell_executable = fact.shell_executable
# Timeout for operations !== timeout for connect (config.CONNECT_TIMEOUT)
timeout = None
# If inside an operation, fetch global arguments
current_global_kwargs = state.current_op_global_kwargs or {}
# Allow `Host.get_fact` calls to explicitly override these
current_global_kwargs.update(global_kwarg_overrides)
if current_global_kwargs:
sudo = current_global_kwargs.get('sudo', sudo)
sudo_user = current_global_kwargs.get('sudo_user', sudo_user)
use_sudo_password = current_global_kwargs.get('use_sudo_password', use_sudo_password)
su_user = current_global_kwargs.get('su_user', su_user)
ignore_errors = current_global_kwargs.get('ignore_errors', ignore_errors)
timeout = current_global_kwargs.get('timeout', timeout)
env = current_global_kwargs.get('env', env)
success_exit_codes = current_global_kwargs.get('success_exit_codes', success_exit_codes)
# Make a hash which keeps facts unique - but usable cross-deploy/threads.
# Locks are used to maintain order.
fact_hash = make_hash((name, kwargs, sudo, sudo_user, su_user, ignore_errors, env))
# Already got this fact? Unlock and return them
current_facts = state.facts.get(fact_hash, {})
if current_facts:
if not ensure_hosts or all(
host in current_facts for host in ensure_hosts
):
return current_facts
with FACT_LOCK:
# Add any hosts we must have, whether considered in the inventory or not
# (these hosts might be outside the --limit or current op limit_hosts).
hosts = set(state.inventory.iter_active_hosts())
if ensure_hosts:
hosts.update(ensure_hosts)
# Execute the command for each state inventory in a greenlet
greenlet_to_host = {}
for host in hosts:
if host in current_facts:
continue
# Generate actual arguments by passing strings as jinja2 templates
host_kwargs = {
key: get_arg_value(state, host, arg)
for key, arg in kwargs.items()
}
command = _make_command(fact.command, host_kwargs)
requires_command = _make_command(fact.requires_command, host_kwargs)
if requires_command:
command = StringCommand(
# Command doesn't exist, return 0 *or* run & return fact command
'!', 'command', '-v', requires_command, '>/dev/null', '||', command,
)
greenlet = state.fact_pool.spawn(
host.run_shell_command,
command,
sudo=sudo,
sudo_user=sudo_user,
use_sudo_password=use_sudo_password,
success_exit_codes=success_exit_codes,
su_user=su_user,
timeout=timeout,
env=env,
shell_executable=shell_executable,
print_output=state.print_fact_output,
print_input=state.print_fact_input,
return_combined_output=True,
)
greenlet_to_host[greenlet] = host
# Wait for all the commands to execute
progress_prefix = 'fact: {0} ({1})'.format(name, get_kwargs_str(kwargs))
with progress_spinner(
greenlet_to_host.values(),
prefix_message=progress_prefix,
) as progress:
for greenlet in gevent.iwait(greenlet_to_host.keys()):
host = greenlet_to_host[greenlet]
progress(host)
hostname_facts = {}
failed_hosts = set()
# Collect the facts and any failures
for greenlet, host in six.iteritems(greenlet_to_host):
status = False
stdout = []
try:
status, combined_output_lines = greenlet.get()
except (timeout_error, socket_error, SSHException) as e:
failed_hosts.add(host)
log_host_command_error(
host, e,
timeout=timeout,
)
stdout, stderr = split_combined_output(combined_output_lines)
data = fact.default()
if status:
if stdout:
data = fact.process(stdout)
elif stderr:
first_line = stderr[0]
if (
sudo_user
and re.match(SUDO_REGEX, first_line)
):
status = True
if (
su_user
and any(re.match(regex, first_line) for regex in SU_REGEXES)
):
status = True
if not status:
failed_hosts.add(host)
if not state.print_fact_output:
print_host_combined_output(host, combined_output_lines)
log_error_or_warning(host, ignore_errors, description=(
'could not load fact: {0} {1}'
).format(name, get_kwargs_str(kwargs)))
hostname_facts[host] = data
log = 'Loaded fact {0}{1}'.format(
click.style(name, bold=True),
' ({0})'.format(get_kwargs_str(kwargs)) if kwargs else '',
)
if state.print_fact_info:
logger.info(log)
else:
logger.debug(log)
# Check we've not failed
if not ignore_errors and apply_failed_hosts:
state.fail_hosts(failed_hosts)
# Assign the facts
state.facts.setdefault(fact_hash, {}).update(hostname_facts)
return state.facts[fact_hash]
|
38,911 | def model_type_schema(
model: Type['BaseModel'],
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
You probably should be using ``model_schema()``, this function is indirectly used by that function.
Take a single ``model`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
ref_prefix = ref_prefix or default_prefix
properties = {}
required = []
definitions: Dict[str, Any] = {}
nested_models: Set[str] = set()
for k, f in model.__fields__.items():
try:
f_schema, f_definitions, f_nested_models = field_schema(
f, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, known_models=known_models
)
except SkipField as skip:
warnings.warn(skip.message, UserWarning)
continue
definitions.update(f_definitions)
nested_models |= f_nested_models
if by_alias:
properties[f.alias] = f_schema
if f.required:
required.append(f.alias)
else:
properties[k] = f_schema
if f.required:
required.append(k)
out_schema = {'type': 'object', 'properties': properties}
if required:
out_schema['required'] = required
return out_schema, definitions, nested_models
| def model_type_schema(
model: Type['BaseModel'],
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
You probably should be using ``model_schema()``, this function is indirectly used by that function.
Take a single ``model`` and generate the schema for its type only, not including additional
information as title, etc. Also return additional schema definitions, from sub-models.
"""
ref_prefix = ref_prefix or default_prefix
properties = {}
required = []
definitions: Dict[str, Any] = {}
nested_models: Set[str] = set()
for k, f in model.__fields__.items():
try:
f_schema, f_definitions, f_nested_models = field_schema(
f, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, known_models=known_models
)
except SkipField as skip:
warnings.warn(skip.message, UserWarning)
continue
definitions.update(f_definitions)
nested_models.update(f_nested_models)
if by_alias:
properties[f.alias] = f_schema
if f.required:
required.append(f.alias)
else:
properties[k] = f_schema
if f.required:
required.append(k)
out_schema = {'type': 'object', 'properties': properties}
if required:
out_schema['required'] = required
return out_schema, definitions, nested_models
|
53,324 | def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10):
r"""
Returns an array of null point object, representing
the null points of the given vector space.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
"""
nullpoints = []
for i in range(len(vspace[0][0]) - 1):
for j in range(len(vspace[0][0][0]) - 1):
for k in range(len(vspace[0][0][0][0]) - 1):
if _reduction(vspace, [i, j, k]):
if _trilinear_analysis(vspace, [i, j, k]):
loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err)
if loc is not None:
p = NullPoint(loc, "N/A")
if p not in nullpoints:
nullpoints.append(p)
return nullpoints
| def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10):
r"""
Returns an array of null point object, representing
the null points of the given vector space.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of `~plasmapy.analysis.nullpoint.NullPoint` objects
representing the nullpoints of the given vector space.
"""
nullpoints = []
for i in range(len(vspace[0][0]) - 1):
for j in range(len(vspace[0][0][0]) - 1):
for k in range(len(vspace[0][0][0][0]) - 1):
if _reduction(vspace, [i, j, k]):
if _trilinear_analysis(vspace, [i, j, k]):
loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err)
if loc is not None:
p = NullPoint(loc, "N/A")
if p not in nullpoints:
nullpoints.append(p)
return nullpoints
|
28,580 | def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
| def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension ``obs_id`` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
25,575 | def wait_for_address_endpoint(base_url: str, retry_timeout: int) -> str:
"""Keeps retrying the `/address` endpoint."""
while True:
try:
address = get_address(base_url)
log.info(f"{address} finished restarting ready")
return address
except requests.ConnectionError:
log.info(f"Waiting for the server {base_url} to start.")
except requests.RequestException:
log.exception(f"Request to server {base_url} failed.")
gevent.sleep(retry_timeout)
raise RuntimeError("Stopping")
| def wait_for_address_endpoint(base_url: str, retry_timeout: int) -> str:
"""Keeps retrying the `/address` endpoint."""
while True:
try:
address = get_address(base_url)
log.info(f"{address} finished starting and is ready")
return address
except requests.ConnectionError:
log.info(f"Waiting for the server {base_url} to start.")
except requests.RequestException:
log.exception(f"Request to server {base_url} failed.")
gevent.sleep(retry_timeout)
raise RuntimeError("Stopping")
|
51,066 | def forgiving_as_timestamp(value, default=_SENTINEL):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
if default is _SENTINEL:
warn_no_default("as_timestamp", value, value)
return value
return default
| def forgiving_as_timestamp(value, default=_SENTINEL):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
if default is _SENTINEL:
warn_no_default("as_timestamp", value, None)
return None
return default
|
31,735 | def get_no_update_value(response: requests.Response) -> bool:
"""
detect if the feed response has been modified according to the headers etag and last_modified.
For more information, see this:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
Args:
response: (requests.Response) The feed response.
Returns:
boolean with the value for noUpdate argument.
The value should be False if the response was modified.
"""
context = demisto.getIntegrationContext()
old_etag = context.get('etag')
old_last_modified = context.get('last_modified')
etag = response.headers.get('ETag')
last_modified = response.headers.get('Last-Modified')
demisto.setIntegrationContext({'last_modified': last_modified, 'etag': etag})
if old_etag and old_etag != etag:
return False
if old_last_modified and old_last_modified != last_modified:
return False
return True
| def get_no_update_value(response: requests.Response) -> bool:
"""
detect if the feed response has been modified according to the headers etag and last_modified.
For more information, see this:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
Args:
response: (requests.Response) The feed response.
Returns:
boolean with the value for noUpdate argument.
The value should be False if the response was modified.
"""
context = demisto.getIntegrationContext()
old_etag = context.get('etag')
old_last_modified = context.get('last_modified')
etag = response.headers.get('ETag')
last_modified = response.headers.get('Last-Modified')
set_integration_context({'last_modified': last_modified, 'etag': etag})
if old_etag and old_etag != etag:
return False
if old_last_modified and old_last_modified != last_modified:
return False
return True
|
26,346 | def parse_input_line(line: str) -> Tuple[str, str]:
line = line.strip()
return line.split(":", maxsplit=2)
| def parse_input_line(line: str) -> Tuple[str, str]:
line = line.strip()
return line.split(":", maxsplit=1)
|
41,673 | def download_and_extract(
buildpath: Path, packagedir: Path, pkg: Dict[str, Any], args
) -> Path:
srcpath = buildpath / packagedir
if "source" not in pkg:
return srcpath
if "url" in pkg["source"]:
response = request.urlopen(pkg["source"]["url"])
_, parameters = cgi.parse_header(
response.headers.get("Content-Disposition", "")
)
if "filename" in parameters:
tarballname = parameters["filename"]
else:
tarballname = Path(response.geturl()).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
try:
os.makedirs(os.path.dirname(tarballpath), exist_ok=True)
with open(tarballpath, "wb") as f:
f.write(response.read())
check_checksum(tarballpath, pkg)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
for extension in [
".tar.gz",
".tgz",
".tar",
".tar.bz2",
".tbz2",
".tar.xz",
".txz",
".zip",
]:
if tarballname.endswith(extension):
tarballname = tarballname[: -len(extension)]
break
if "extract_dir" in pkg["source"]:
return buildpath / pkg["source"]["extract_dir"]
else:
return buildpath / tarballname
elif "path" in pkg["source"]:
srcdir = Path(pkg["source"]["path"])
if not srcdir.is_dir():
raise ValueError("'path' must point to a path")
if not srcpath.is_dir():
shutil.copytree(srcdir, srcpath)
return srcpath
else:
raise ValueError("Incorrect source provided")
| def download_and_extract(
buildpath: Path, packagedir: Path, pkg: Dict[str, Any], args
) -> Path:
srcpath = buildpath / packagedir
if "source" not in pkg:
return srcpath
if "url" in pkg["source"]:
response = request.urlopen(pkg["source"]["url"])
_, parameters = cgi.parse_header(
response.headers.get("Content-Disposition", "")
)
if "filename" in parameters:
tarballname = parameters["filename"]
else:
tarballname = Path(response.geturl()).name
tarballpath = buildpath / tarballname
if not tarballpath.is_file():
try:
os.makedirs(os.path.dirname(tarballpath), exist_ok=True)
with open(tarballpath, "wb") as f:
f.write(response.read())
check_checksum(tarballpath, pkg)
except Exception:
tarballpath.unlink()
raise
if not srcpath.is_dir():
shutil.unpack_archive(str(tarballpath), str(buildpath))
for extension in [
".tar.gz",
".tgz",
".tar",
".tar.bz2",
".tbz2",
".tar.xz",
".txz",
".zip",
]:
if tarballname.endswith(extension):
tarballname = tarballname[: -len(extension)]
break
return buildpath / pkg.get("source", {}).get("extract_dir", tarballname)
elif "path" in pkg["source"]:
srcdir = Path(pkg["source"]["path"])
if not srcdir.is_dir():
raise ValueError("'path' must point to a path")
if not srcpath.is_dir():
shutil.copytree(srcdir, srcpath)
return srcpath
else:
raise ValueError("Incorrect source provided")
|
53,935 | def _sanitize_label(value: str) -> str:
"""Return a legal value for a BigQuery label."""
value = value.strip().lower()
value = _SANITIZE_LABEL_PATTERN.sub("_", value)
value_length = len(value)
if value_length > _VALIDATE_LABEL_LENGTH_LIMIT:
error_msg = (
f"Current label length {value_length} is greater than length limit: {_VALIDATE_LABEL_LENGTH_LIMIT} | Current sanitized label: {value}"
)
raise Exception(error_msg)
else:
return value
| def _sanitize_label(value: str) -> str:
"""Return a legal value for a BigQuery label."""
value = value.strip().lower()
value = _SANITIZE_LABEL_PATTERN.sub("_", value)
value_length = len(value)
if value_length > _VALIDATE_LABEL_LENGTH_LIMIT:
error_msg = (
f"Job label length {value_length} is greater than length limit: " \
f"{_VALIDATE_LABEL_LENGTH_LIMIT}\n" \
f"Current sanitized label: {value}"
)
raise Exception(error_msg)
else:
return value
|
5,326 | def _call_apt(args, scope=True, **kwargs):
'''
Call apt* utilities.
'''
cmd = []
if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope', '--description "{0}"'.format(__name__)])
cmd.extend(args)
params = {'output_loglevel': 'trace',
'python_shell': False,
'env': salt.utils.environment.get_module_environment(globals())}
params.update(kwargs)
return __salt__['cmd.run_all'](cmd, **params)
| def _call_apt(args, scope=True, **kwargs):
'''
Call apt* utilities.
'''
cmd = []
if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope', '--description', '"{0}"'.format(__name__)])
cmd.extend(args)
params = {'output_loglevel': 'trace',
'python_shell': False,
'env': salt.utils.environment.get_module_environment(globals())}
params.update(kwargs)
return __salt__['cmd.run_all'](cmd, **params)
|
55,544 | def concatenate(dfs):
"""
Concatenate pandas DataFrames with saving 'category' dtype.
Parameters
----------
dfs : list
List of pandas DataFrames to concatenate.
Returns
-------
pandas.DataFrame
A pandas DataFrame.
"""
categoricals_column_names = set.intersection(
*[set(df.select_dtypes("category").columns.tolist()) for df in dfs]
)
for column_name in categoricals_column_names:
# Build a list of all columns in all dfs with name column_name.
categorical_columns_with_name = []
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if type(categorical_columns_in_df) == pandas.Series:
categorical_columns_with_name.append(categorical_columns_in_df)
else:
# If the column name is repeated, df[column_name] gives a
# a dataframe with all matching columns instead of a series.
categorical_columns_with_name.extend(
categorical_columns_in_df.iloc[:, i]
for i in range(len(categorical_columns_in_df.columns))
)
# Make a new category unioning all columns with the current name.
categories = union_categoricals(categorical_columns_with_name).categories
# Replace all columns having the current name with the new category.
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if type(categorical_columns_in_df) == pandas.Series:
df[column_name] = pandas.Categorical(
df[column_name], categories=categories
)
else:
for i in range(len(categorical_columns_in_df.columns)):
df.iloc[:, i] = pandas.Categorical(
df.iloc[:, i], categories=categories
)
return pandas.concat(dfs)
| def concatenate(dfs):
"""
Concatenate pandas DataFrames with saving 'category' dtype.
Parameters
----------
dfs : list
List of pandas DataFrames to concatenate.
Returns
-------
pandas.DataFrame
A pandas DataFrame.
"""
categoricals_column_names = set.intersection(
*[set(df.select_dtypes("category").columns.tolist()) for df in dfs]
)
for column_name in categoricals_column_names:
# Build a list of all columns in all dfs with name column_name.
categorical_columns_with_name = []
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if type(categorical_columns_in_df) == pandas.Series:
categorical_columns_with_name.append(categorical_columns_in_df)
else:
# If the column name is repeated, df[column_name] gives a
# a dataframe with all matching columns instead of a series.
categorical_columns_with_name.extend(
col for _, col in categorical_columns_in_df.iteritems()
)
# Make a new category unioning all columns with the current name.
categories = union_categoricals(categorical_columns_with_name).categories
# Replace all columns having the current name with the new category.
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if type(categorical_columns_in_df) == pandas.Series:
df[column_name] = pandas.Categorical(
df[column_name], categories=categories
)
else:
for i in range(len(categorical_columns_in_df.columns)):
df.iloc[:, i] = pandas.Categorical(
df.iloc[:, i], categories=categories
)
return pandas.concat(dfs)
|
42,922 | def c_0(clique: list, graph: nx.Graph):
"""Generates the set :math:`C_0` of nodes that are connected to all nodes in the input
clique subgraph.
The set :math:`C_0` is defined in :cite:`pullan2006phased` and is used to determine nodes
that can be added to the current clique to grow it into a larger one.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.complete_graph(10)
>>> subgraph = [0, 1, 2, 3, 4]
>>> utils.c_0(subgraph, graph)
[5, 6, 7, 8, 9]
Args:
clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique.
graph (nx.Graph): The input graph.
Returns:
list[int]: A list containing the :math:`C_0` nodes for the clique.
"""
if not is_clique(graph.subgraph(clique)):
raise ValueError("Input subgraph is not a clique")
clique = set(clique)
c_0_nodes = []
non_clique_nodes = set(graph.nodes) - clique
for i in non_clique_nodes:
if clique.issubset(graph.neighbors(i)):
c_0_nodes.append(i)
return c_0_nodes
| def c_0(clique: list, graph: nx.Graph):
"""Generates the set :math:`C_0` of nodes that are connected to all nodes in the input
clique subgraph.
The set :math:`C_0` is defined in :cite:`pullan2006phased` and is used to determine nodes
that can be added to the current clique to grow it into a larger one.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.complete_graph(10)
>>> subgraph = [0, 1, 2, 3, 4]
>>> utils.c_0(subgraph, graph)
[5, 6, 7, 8, 9]
Args:
clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique.
graph (nx.Graph): The input graph.
Returns:
list[int]: a list containing the :math:`C_0` nodes for the clique
"""
if not is_clique(graph.subgraph(clique)):
raise ValueError("Input subgraph is not a clique")
clique = set(clique)
c_0_nodes = []
non_clique_nodes = set(graph.nodes) - clique
for i in non_clique_nodes:
if clique.issubset(graph.neighbors(i)):
c_0_nodes.append(i)
return c_0_nodes
|
47,399 | def pipeline(
task: str = None,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
framework: Optional[str] = None,
revision: Optional[str] = None,
use_fast: bool = True,
use_auth_token: Optional[Union[str, bool]] = None,
model_kwargs: Dict[str, Any] = None,
pipeline_class: Optional[Any] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a [`Pipeline`].
Pipelines are made of:
- A [tokenizer](tokenizer) in charge of mapping raw textual input to token.
- A [model](model) to make predictions from the inputs.
- Some (optional) post processing for enhancing model's output.
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`: will return a [`AudioClassificationPipeline`].
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
- `"conversational"`: will return a [`ConversationalPipeline`].
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
- `"fill-mask"`: will return a [`FillMaskPipeline`]:.
- `"image-classification"`: will return a [`ImageClassificationPipeline`].
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
[`TextClassificationPipeline`].
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
- `"translation"`: will return a [`TranslationPipeline`].
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
- `"summarization"`: will return a [`SummarizationPipeline`].
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
[`TFPreTrainedModel`] (for TensorFlow).
If not provided, the default for the `task` will be loaded.
config (`str` or [`PretrainedConfig`], *optional*):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
If not provided, the default configuration file for the requested model will be used. That means that if
`model` is given, its default configuration will be used. However, if `model` is not supplied, this
`task`'s default model's config is used instead.
tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
will be loaded.
feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
models. Multi-modal models will also require a tokenizer to be passed.
If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
`model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
is a string). However, if `config` is also not given or not a string, then the default feature extractor
for the given `task` will be loaded.
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
revision(`str`, *optional*, defaults to `"main"`):
When passing a task name or a string model identifier: The specific model version to use. It can be a
branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
use_fast (`bool`, *optional*, defaults to `True`):
Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to
`"main"`):
model_kwargs:
Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
**model_kwargs)` function.
kwargs:
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
[`Pipeline`]: A suitable pipeline for the task.
Examples:
```python
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> pipeline("sentiment-analysis")
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> pipeline("question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased")
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> pipeline("ner", model=model, tokenizer=tokenizer)
```"""
if model_kwargs is None:
model_kwargs = {}
if task is None and model is None:
raise RuntimeError(
"Impossible to instantiate a pipeline without either a task or a model"
"being specified."
"Please provide a task class or a model"
)
if model is None and tokenizer is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with tokenizer specified but not the model "
"as the provided tokenizer may not be compatible with the default model. "
"Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing tokenizer."
)
if model is None and feature_extractor is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with feature_extractor specified but not the model "
"as the provided feature_extractor may not be compatible with the default model. "
"Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing feature_extractor."
)
if task is None and model is not None:
if not isinstance(model, str):
raise RuntimeError(
"Inferring the task automatically requires to check the hub with a model_id defined as a `str`."
f"{model} is not a valid model_id."
)
task = get_task(model, use_auth_token)
# Retrieve the task
targeted_task, task_options = check_task(task)
if pipeline_class is None:
pipeline_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
# At that point framework might still be undetermined
model = get_default_model(targeted_task, framework, task_options)
logger.warning(f"No model was supplied, defaulted to {model} (https://huggingface.co/{model})")
# Retrieve use_auth_token and add it to model_kwargs to be used in .from_pretrained
model_kwargs["use_auth_token"] = model_kwargs.get("use_auth_token", use_auth_token)
# Config is the primordial information item.
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config, revision=revision, _from_pipeline=task, **model_kwargs)
elif config is None and isinstance(model, str):
config = AutoConfig.from_pretrained(model, revision=revision, _from_pipeline=task, **model_kwargs)
model_name = model if isinstance(model, str) else None
# Infer the framework from the model
# Forced if framework already defined, inferred if it's None
# Will load the correct model if possible
model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]}
framework, model = infer_framework_load_model(
model,
model_classes=model_classes,
config=config,
framework=framework,
revision=revision,
task=task,
**model_kwargs,
)
model_config = model.config
load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None
load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None
if task in NO_TOKENIZER_TASKS:
# These will never require a tokenizer.
# the model on the other hand might have a tokenizer, but
# the files could be missing from the hub, instead of failing
# on such repos, we just force to not load it.
load_tokenizer = False
if task in NO_FEATURE_EXTRACTOR_TASKS:
load_feature_extractor = False
if load_tokenizer:
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model_name, str):
tokenizer = model_name
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guess what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
use_fast = tokenizer[1].pop("use_fast", use_fast)
tokenizer_identifier = tokenizer[0]
tokenizer_kwargs = tokenizer[1]
else:
tokenizer_identifier = tokenizer
tokenizer_kwargs = model_kwargs
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_identifier, revision=revision, use_fast=use_fast, _from_pipeline=task, **tokenizer_kwargs
)
if load_feature_extractor:
# Try to infer feature extractor from model or config name (if provided as str)
if feature_extractor is None:
if isinstance(model_name, str):
feature_extractor = model_name
elif isinstance(config, str):
feature_extractor = config
else:
# Impossible to guess what is the right feature_extractor here
raise Exception(
"Impossible to guess which feature extractor to use. "
"Please provide a PreTrainedFeatureExtractor class or a path/identifier "
"to a pretrained feature extractor."
)
# Instantiate feature_extractor if needed
if isinstance(feature_extractor, (str, tuple)):
feature_extractor = AutoFeatureExtractor.from_pretrained(
feature_extractor, revision=revision, _from_pipeline=task, **model_kwargs
)
if (
feature_extractor._processor_class
and feature_extractor._processor_class.endswith("WithLM")
and isinstance(model_name, str)
):
try:
import kenlm # to trigger `ImportError` if not installed
from pyctcdecode import BeamSearchDecoderCTC
if os.path.isdir(model_name):
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
else:
language_model_glob = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*")
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_regex = [language_model_glob, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_regex=allow_regex)
kwargs["decoder"] = decoder
except ImportError as e:
logger.warning(
"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Try to install `pyctcdecode` and `kenlm`: (`pip install pyctcdecode`, `pip install https://github.com/kpu/kenlm/archive/master.zip`): Error: {e}"
)
if task == "translation" and model.config.task_specific_params:
for key in model.config.task_specific_params:
if key.startswith("translation"):
task = key
warnings.warn(
f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
UserWarning,
)
break
if tokenizer is not None:
kwargs["tokenizer"] = tokenizer
if feature_extractor is not None:
kwargs["feature_extractor"] = feature_extractor
return pipeline_class(model=model, framework=framework, task=task, **kwargs)
| def pipeline(
task: str = None,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
framework: Optional[str] = None,
revision: Optional[str] = None,
use_fast: bool = True,
use_auth_token: Optional[Union[str, bool]] = None,
model_kwargs: Dict[str, Any] = None,
pipeline_class: Optional[Any] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a [`Pipeline`].
Pipelines are made of:
- A [tokenizer](tokenizer) in charge of mapping raw textual input to token.
- A [model](model) to make predictions from the inputs.
- Some (optional) post processing for enhancing model's output.
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`: will return a [`AudioClassificationPipeline`].
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
- `"conversational"`: will return a [`ConversationalPipeline`].
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
- `"fill-mask"`: will return a [`FillMaskPipeline`]:.
- `"image-classification"`: will return a [`ImageClassificationPipeline`].
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
[`TextClassificationPipeline`].
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
- `"translation"`: will return a [`TranslationPipeline`].
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
- `"summarization"`: will return a [`SummarizationPipeline`].
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
[`TFPreTrainedModel`] (for TensorFlow).
If not provided, the default for the `task` will be loaded.
config (`str` or [`PretrainedConfig`], *optional*):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
If not provided, the default configuration file for the requested model will be used. That means that if
`model` is given, its default configuration will be used. However, if `model` is not supplied, this
`task`'s default model's config is used instead.
tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
will be loaded.
feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
models. Multi-modal models will also require a tokenizer to be passed.
If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
`model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
is a string). However, if `config` is also not given or not a string, then the default feature extractor
for the given `task` will be loaded.
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
revision(`str`, *optional*, defaults to `"main"`):
When passing a task name or a string model identifier: The specific model version to use. It can be a
branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
use_fast (`bool`, *optional*, defaults to `True`):
Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`). revision(`str`, *optional*, defaults to
`"main"`):
model_kwargs:
Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
**model_kwargs)` function.
kwargs:
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
[`Pipeline`]: A suitable pipeline for the task.
Examples:
```python
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> pipeline("sentiment-analysis")
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> pipeline("question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="bert-base-cased")
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> pipeline("ner", model=model, tokenizer=tokenizer)
```"""
if model_kwargs is None:
model_kwargs = {}
if task is None and model is None:
raise RuntimeError(
"Impossible to instantiate a pipeline without either a task or a model"
"being specified."
"Please provide a task class or a model"
)
if model is None and tokenizer is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with tokenizer specified but not the model "
"as the provided tokenizer may not be compatible with the default model. "
"Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing tokenizer."
)
if model is None and feature_extractor is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with feature_extractor specified but not the model "
"as the provided feature_extractor may not be compatible with the default model. "
"Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing feature_extractor."
)
if task is None and model is not None:
if not isinstance(model, str):
raise RuntimeError(
"Inferring the task automatically requires to check the hub with a model_id defined as a `str`."
f"{model} is not a valid model_id."
)
task = get_task(model, use_auth_token)
# Retrieve the task
targeted_task, task_options = check_task(task)
if pipeline_class is None:
pipeline_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
# At that point framework might still be undetermined
model = get_default_model(targeted_task, framework, task_options)
logger.warning(f"No model was supplied, defaulted to {model} (https://huggingface.co/{model})")
# Retrieve use_auth_token and add it to model_kwargs to be used in .from_pretrained
model_kwargs["use_auth_token"] = model_kwargs.get("use_auth_token", use_auth_token)
# Config is the primordial information item.
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config, revision=revision, _from_pipeline=task, **model_kwargs)
elif config is None and isinstance(model, str):
config = AutoConfig.from_pretrained(model, revision=revision, _from_pipeline=task, **model_kwargs)
model_name = model if isinstance(model, str) else None
# Infer the framework from the model
# Forced if framework already defined, inferred if it's None
# Will load the correct model if possible
model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]}
framework, model = infer_framework_load_model(
model,
model_classes=model_classes,
config=config,
framework=framework,
revision=revision,
task=task,
**model_kwargs,
)
model_config = model.config
load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None
load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None
if task in NO_TOKENIZER_TASKS:
# These will never require a tokenizer.
# the model on the other hand might have a tokenizer, but
# the files could be missing from the hub, instead of failing
# on such repos, we just force to not load it.
load_tokenizer = False
if task in NO_FEATURE_EXTRACTOR_TASKS:
load_feature_extractor = False
if load_tokenizer:
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model_name, str):
tokenizer = model_name
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guess what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
use_fast = tokenizer[1].pop("use_fast", use_fast)
tokenizer_identifier = tokenizer[0]
tokenizer_kwargs = tokenizer[1]
else:
tokenizer_identifier = tokenizer
tokenizer_kwargs = model_kwargs
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_identifier, revision=revision, use_fast=use_fast, _from_pipeline=task, **tokenizer_kwargs
)
if load_feature_extractor:
# Try to infer feature extractor from model or config name (if provided as str)
if feature_extractor is None:
if isinstance(model_name, str):
feature_extractor = model_name
elif isinstance(config, str):
feature_extractor = config
else:
# Impossible to guess what is the right feature_extractor here
raise Exception(
"Impossible to guess which feature extractor to use. "
"Please provide a PreTrainedFeatureExtractor class or a path/identifier "
"to a pretrained feature extractor."
)
# Instantiate feature_extractor if needed
if isinstance(feature_extractor, (str, tuple)):
feature_extractor = AutoFeatureExtractor.from_pretrained(
feature_extractor, revision=revision, _from_pipeline=task, **model_kwargs
)
if (
feature_extractor._processor_class
and feature_extractor._processor_class.endswith("WithLM")
and isinstance(model_name, str)
):
try:
import kenlm # to trigger `ImportError` if not installed
from pyctcdecode import BeamSearchDecoderCTC
if os.path.isdir(model_name) or os.path.isfile(model_name):
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
else:
language_model_glob = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*")
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_regex = [language_model_glob, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_regex=allow_regex)
kwargs["decoder"] = decoder
except ImportError as e:
logger.warning(
"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Try to install `pyctcdecode` and `kenlm`: (`pip install pyctcdecode`, `pip install https://github.com/kpu/kenlm/archive/master.zip`): Error: {e}"
)
if task == "translation" and model.config.task_specific_params:
for key in model.config.task_specific_params:
if key.startswith("translation"):
task = key
warnings.warn(
f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
UserWarning,
)
break
if tokenizer is not None:
kwargs["tokenizer"] = tokenizer
if feature_extractor is not None:
kwargs["feature_extractor"] = feature_extractor
return pipeline_class(model=model, framework=framework, task=task, **kwargs)
|
32,043 | def get_list_by_id(args: dict, sg):
listID = args.get('list_id')
params = {}
contactSample = args.get('contact_sample')
if contactSample:
params['contact_sample'] = False if contactSample == 'False' else True
response = sg.client.marketing.lists._(listID).get(query_params=params)
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.List': body}
md = tableToMarkdown('List details ', body)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'Retrieving a list is failed: ' + str(response.body)
| def get_list_by_id(args: dict, sg):
listID = args.get('list_id')
params = {}
contactSample = args.get('contact_sample')
if contactSample:
params['contact_sample'] = False if contactSample == 'False' else True
response = sg.client.marketing.lists._(listID).get(query_params=params)
if response.status_code == 200:
rBody = response.body
body = json.loads(rBody.decode("utf-8"))
ec = {'Sendgrid.List': body}
md = tableToMarkdown('List details ', body)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': body,
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'Failed to retrieve list information: ' + str(response.body)
|
20,022 | def find_branch_pts(skel_img, mask=None):
"""
The branching algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
Inputs:
skel_img = Skeletonized image
mask = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.
Returns:
branch_pts_img = Image with just branch points, rest 0
:param skel_img: numpy.ndarray
:param mask: np.ndarray
:return branch_pts_img: numpy.ndarray
"""
# In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to don't care
# T like branch points
t1 = np.array([[-1, 1, -1],
[1, 1, 1],
[-1, -1, -1]])
t2 = np.array([[1, -1, 1],
[-1, 1, -1],
[1, -1, -1]])
t3 = np.rot90(t1)
t4 = np.rot90(t2)
t5 = np.rot90(t3)
t6 = np.rot90(t4)
t7 = np.rot90(t5)
t8 = np.rot90(t6)
# Y like branch points
y1 = np.array([[1, -1, 1],
[0, 1, 0],
[0, 1, 0]])
y2 = np.array([[-1, 1, -1],
[1, 1, 0],
[-1, 0, 1]])
y3 = np.rot90(y1)
y4 = np.rot90(y2)
y5 = np.rot90(y3)
y6 = np.rot90(y4)
y7 = np.rot90(y5)
y8 = np.rot90(y6)
kernels = [t1, t2, t3, t4, t5, t6, t7, t8, y1, y2, y3, y4, y5, y6, y7, y8]
branch_pts_img = np.zeros(skel_img.shape[:2], dtype=int)
# Store branch points
for kernel in kernels:
branch_pts_img = np.logical_or(cv2.morphologyEx(skel_img, op=cv2.MORPH_HITMISS, kernel=kernel,
borderType=cv2.BORDER_CONSTANT, borderValue=0), branch_pts_img)
# Switch type to uint8 rather than bool
branch_pts_img = branch_pts_img.astype(np.uint8) * 255
# Store debug
debug = params.debug
params.debug = None
# Make debugging image
if mask is None:
dilated_skel = dilate(skel_img, params.line_thickness, 1)
branch_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)
else:
# Make debugging image on mask
mask_copy = mask.copy()
branch_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
skel_obj, skel_hier = find_objects(skel_img, skel_img)
cv2.drawContours(branch_plot, skel_obj, -1, (150, 150, 150), params.line_thickness, lineType=8,
hierarchy=skel_hier)
branch_objects, _ = find_objects(branch_pts_img, branch_pts_img)
# Initialize list of tip data points
branch_list = []
branch_labels = []
for i, branch in enumerate(branch_objects):
x, y = branch.ravel()[:2]
branch_list.append((float(x), float(y)))
branch_labels.append(i)
cv2.circle(branch_plot, (x, y), params.line_thickness, (255, 0, 255), -1)
outputs.add_observation(variable='branch_pts', trait='list of tip coordinates identified from a skeleton',
method='plantcv.plantcv.morphology.find_branch_pts', scale='pixels', datatype=list,
value=branch_list, label=branch_labels)
# Reset debug mode
params.debug = debug
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(branch_plot, os.path.join(params.debug_outdir, str(params.device) + '_branch_pts.png'))
elif params.debug == 'plot':
plot_image(branch_plot)
return branch_pts_img
| def find_branch_pts(skel_img, mask=None):
"""
The branching algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
Inputs:
skel_img = Skeletonized image
mask = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.
Returns:
branch_pts_img = Image with just branch points, rest 0
:param skel_img: numpy.ndarray
:param mask: np.ndarray
:return branch_pts_img: numpy.ndarray
"""
# In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to don't care
# T like branch points
t1 = np.array([[-1, 1, -1],
[1, 1, 1],
[-1, -1, -1]])
t2 = np.array([[1, -1, 1],
[-1, 1, -1],
[1, -1, -1]])
t3 = np.rot90(t1)
t4 = np.rot90(t2)
t5 = np.rot90(t3)
t6 = np.rot90(t4)
t7 = np.rot90(t5)
t8 = np.rot90(t6)
# Y like branch points
y1 = np.array([[1, -1, 1],
[0, 1, 0],
[0, 1, 0]])
y2 = np.array([[-1, 1, -1],
[1, 1, 0],
[-1, 0, 1]])
y3 = np.rot90(y1)
y4 = np.rot90(y2)
y5 = np.rot90(y3)
y6 = np.rot90(y4)
y7 = np.rot90(y5)
y8 = np.rot90(y6)
kernels = [t1, t2, t3, t4, t5, t6, t7, t8, y1, y2, y3, y4, y5, y6, y7, y8]
branch_pts_img = np.zeros(skel_img.shape[:2], dtype=int)
# Store branch points
for kernel in kernels:
branch_pts_img = np.logical_or(cv2.morphologyEx(skel_img, op=cv2.MORPH_HITMISS, kernel=kernel,
borderType=cv2.BORDER_CONSTANT, borderValue=0), branch_pts_img)
# Switch type to uint8 rather than bool
branch_pts_img = branch_pts_img.astype(np.uint8) * 255
# Store debug
debug = params.debug
params.debug = None
# Make debugging image
if mask is None:
dilated_skel = dilate(skel_img, params.line_thickness, 1)
branch_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)
else:
# Make debugging image on mask
mask_copy = mask.copy()
branch_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
skel_obj, skel_hier = find_objects(skel_img, skel_img)
cv2.drawContours(branch_plot, skel_obj, -1, (150, 150, 150), params.line_thickness, lineType=8,
hierarchy=skel_hier)
branch_objects, _ = find_objects(branch_pts_img, branch_pts_img)
# Initialize list of tip data points
branch_list = []
branch_labels = []
for i, branch in enumerate(branch_objects):
x, y = branch.ravel()[:2]
branch_list.append((float(x), float(y)))
branch_labels.append(i)
cv2.circle(branch_plot, (x, y), params.line_thickness, (255, 0, 255), -1)
outputs.add_observation(variable='branch_pts', trait='list of branch-point coordinates identified from a skeleton',
method='plantcv.plantcv.morphology.find_branch_pts', scale='pixels', datatype=list,
value=branch_list, label=branch_labels)
# Reset debug mode
params.debug = debug
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(branch_plot, os.path.join(params.debug_outdir, str(params.device) + '_branch_pts.png'))
elif params.debug == 'plot':
plot_image(branch_plot)
return branch_pts_img
|
50,105 | def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog=NAME,
description='Run cloud-init in daemon mode')
return parser
| def get_parser(parser):
return parser
if not parser:
parser = argparse.ArgumentParser(
prog=NAME,
description='Run cloud-init in daemon mode')
return parser
|
12,967 | def test_page_publication_date_sets_is_publish_staff_user(
staff_api_client, api_client, permission_manage_pages, page
):
publication_date = date(year=2020, month=3, day=18)
with freeze_time(publication_date):
page.publication_date = date.today()
page.save(update_fields=["publication_date"])
variables = {"id": graphene.Node.to_global_id("Page", page.pk)}
staff_api_client.user.user_permissions.add(permission_manage_pages)
with freeze_time(publication_date.replace(day=publication_date.day - 1)):
response = staff_api_client.post_graphql(QUERY_PAGE_IS_PUBLISHED, variables)
content = get_graphql_content(response, ignore_errors=True)
data = content["data"]["page"]
is_published = data["isPublished"]
assert is_published is False
| def test_page_publication_date_sets_is_publish_staff_user(
staff_api_client, api_client, permission_manage_pages, page
):
publication_date = date(year=2020, month=3, day=18)
with freeze_time(publication_date):
page.publication_date = date.today()
page.save(update_fields=["publication_date"])
variables = {"id": graphene.Node.to_global_id("Page", page.pk)}
staff_api_client.user.user_permissions.add(permission_manage_pages)
with freeze_time(publication_date.replace(day=publication_date.day - 1)):
response = staff_api_client.post_graphql(QUERY_PAGE_IS_PUBLISHED, variables)
content = get_graphql_content(response, ignore_errors=True)
data = content["data"]["page"]
assert data["isPublished"] is False
|
26,457 | def get_new_command(command):
# because composer lets you install many packages at once, must look at output to determine the erroneous package name
wrong_package_name = re.search(r"Could not find package (.*)\.", command.output).group(1)
offending_script_param = wrong_package_name if (wrong_package_name in command.script_parts) else re.findall(
r"{}:[^ ]+".format(wrong_package_name), command.script)[0]
version_constraint = offending_script_param[len(wrong_package_name):]
one_suggestion_only = 'did you mean this?' in command.output.lower()
if one_suggestion_only:
# wrong regex??
new_cmd = re.findall(r'Did you mean this\?[^\n]*\n\s*([^\n]*)', command.output)
return replace_argument(command.script, offending_script_param, new_cmd[0].strip() + version_constraint)
else:
# there are multiple suggestions
# trim output text to make it more digestable by regex
trim_start_index = command.output.find("Did you mean one of these?")
short_output = command.output[trim_start_index:]
stripped_lines = [line.strip() for line in short_output.split("\n")]
# each of the suggested commands can be found from index 1 to the first occurence of blank string
try:
end_index = stripped_lines.index('')
except ValueError:
end_index = None
suggested_commands = stripped_lines[1:end_index]
return [
replace_argument(command.script, offending_script_param, cmd + version_constraint)
for cmd in suggested_commands
]
| def get_new_command(command):
# because composer lets you install many packages at once, must look at output to determine the erroneous package name
wrong_package_name = re.search(r"Could not find package (.*)\.", command.output).group(1)
offending_script_param = wrong_package_name if (wrong_package_name in command.script_parts) else re.findall(
r"{}:[^ ]+".format(wrong_package_name), command.script)[0]
version_constraint = offending_script_param[len(wrong_package_name):]
one_suggestion_only = 'did you mean this?' in command.output.lower()
if one_suggestion_only:
# wrong regex??
new_cmd = re.findall(r'Did you mean this\?[^\n]*\n\s*([^\n]*)', command.output)
return replace_argument(command.script, offending_script_param, new_cmd[0].strip() + version_constraint)
else:
# there are multiple suggestions
# trim output text to make it more digestable by regex
trim_start_index = command.output.find("Did you mean one of these?")
short_output = command.output[trim_start_index:]
stripped_lines = [line.strip() for line in short_output.split("\n")]
# each of the suggested packages can be found from index 1 to the first occurrence of a blank string
try:
end_index = stripped_lines.index('')
except ValueError:
end_index = None
suggested_commands = stripped_lines[1:end_index]
return [
replace_argument(command.script, offending_script_param, cmd + version_constraint)
for cmd in suggested_commands
]
|
37,804 | def get_python_configurations(build_selector: BuildSelector) -> List[PythonConfiguration]:
python_configurations = [
# CPython
PythonConfiguration(version='2.7', identifier='cp27-macosx_x86_64', url='https://www.python.org/ftp/python/2.7.18/python-2.7.18-macosx10.9.pkg'),
# TODO: figure out what's going on in CPython 3.5 on macOS 11. or, remove it :)
# PythonConfiguration(version='3.5', identifier='cp35-macosx_x86_64', url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-macosx10.6.pkg'),
PythonConfiguration(version='3.6', identifier='cp36-macosx_x86_64', url='https://www.python.org/ftp/python/3.6.8/python-3.6.8-macosx10.9.pkg'),
PythonConfiguration(version='3.7', identifier='cp37-macosx_x86_64', url='https://www.python.org/ftp/python/3.7.8/python-3.7.8-macosx10.9.pkg'),
PythonConfiguration(version='3.8', identifier='cp38-macosx_x86_64', url='https://www.python.org/ftp/python/3.8.4/python-3.8.4-macosx10.9.pkg'),
# TODO: Find some better way to select Universal2 vs. regular (note that this works on 10.9+, regardless of the name)
PythonConfiguration(version='3.9', identifier='cp39-macosx_x86_64', url='https://www.python.org/ftp/python/3.9.1/python-3.9.1-macos11.0.pkg'),
PythonConfiguration(version='3.9', identifier='cp39-macosx_universal2', url='https://www.python.org/ftp/python/3.9.1/python-3.9.1-macos11.0.pkg'),
PythonConfiguration(version='3.9', identifier='cp39-macosx_arm64', url='https://www.python.org/ftp/python/3.9.1/python-3.9.1-macos11.0.pkg'),
# PyPy
# TODO: may not support 11.0 yet
# PythonConfiguration(version='2.7', identifier='pp27-macosx_x86_64', url='https://downloads.python.org/pypy/pypy2.7-v7.3.3-osx64.tar.bz2'),
PythonConfiguration(version='3.6', identifier='pp36-macosx_x86_64', url='https://downloads.python.org/pypy/pypy3.6-v7.3.3-osx64.tar.bz2'),
PythonConfiguration(version='3.7', identifier='pp37-macosx_x86_64', url='https://downloads.python.org/pypy/pypy3.7-v7.3.3-osx64.tar.bz2'),
]
# skip builds as required
return [c for c in python_configurations if build_selector(c.identifier)]
| def get_python_configurations(build_selector: BuildSelector) -> List[PythonConfiguration]:
python_configurations = [
# CPython
PythonConfiguration(version='2.7', identifier='cp27-macosx_x86_64', url='https://www.python.org/ftp/python/2.7.18/python-2.7.18-macosx10.9.pkg'),
# TODO: figure out what's going on in CPython 3.5 on macOS 11. or, remove it :)
# PythonConfiguration(version='3.5', identifier='cp35-macosx_x86_64', url='https://www.python.org/ftp/python/3.5.4/python-3.5.4-macosx10.6.pkg'),
PythonConfiguration(version='3.6', identifier='cp36-macosx_x86_64', url='https://www.python.org/ftp/python/3.6.8/python-3.6.8-macosx10.9.pkg'),
PythonConfiguration(version='3.7', identifier='cp37-macosx_x86_64', url='https://www.python.org/ftp/python/3.7.8/python-3.7.8-macosx10.9.pkg'),
PythonConfiguration(version='3.8', identifier='cp38-macosx_x86_64', url='https://www.python.org/ftp/python/3.8.4/python-3.8.4-macosx10.9.pkg'),
# TODO: Find some better way to select Universal2 vs. regular (note that this works on 10.9+, regardless of the name)
PythonConfiguration(version='3.9', identifier='cp39-macosx_x86_64', url='https://www.python.org/ftp/python/3.9.1/python-3.9.1-macos11.0.pkg'),
PythonConfiguration(version='3.9', identifier='cp39-macos_universal2', url='https://www.python.org/ftp/python/3.9.1/python-3.9.1-macos11.0.pkg'),
PythonConfiguration(version='3.9', identifier='cp39-macosx_arm64', url='https://www.python.org/ftp/python/3.9.1/python-3.9.1-macos11.0.pkg'),
# PyPy
# TODO: may not support 11.0 yet
# PythonConfiguration(version='2.7', identifier='pp27-macosx_x86_64', url='https://downloads.python.org/pypy/pypy2.7-v7.3.3-osx64.tar.bz2'),
PythonConfiguration(version='3.6', identifier='pp36-macosx_x86_64', url='https://downloads.python.org/pypy/pypy3.6-v7.3.3-osx64.tar.bz2'),
PythonConfiguration(version='3.7', identifier='pp37-macosx_x86_64', url='https://downloads.python.org/pypy/pypy3.7-v7.3.3-osx64.tar.bz2'),
]
# skip builds as required
return [c for c in python_configurations if build_selector(c.identifier)]
|
36,277 | def measure_observables(qc: QuantumComputer, tomo_experiment: TomographyExperiment,
n_shots: int = 10000,
progress_callback: Optional[Callable[[int, int], None]] = None,
active_reset = False,
symmetrize_readout: Optional[str] = 'exhaustive',
calibrate_readout: Optional[str] = 'plus-eig',
readout_symmetrize: Optional[str] = None,
show_progress_bar: bool = False):
"""
Measure all the observables in a TomographyExperiment.
:param qc: A QuantumComputer which can run quantum programs
:param tomo_experiment: A suite of tomographic observables to measure
:param n_shots: The number of shots to take per ExperimentSetting
:param progress_callback: If not None, this function is called each time a group of
settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress
is ``i / len(tomo_experiment)``.
:param active_reset: Whether to actively reset qubits instead of waiting several
times the coherence length for qubits to decay to |0> naturally. Setting this
to True is much faster but there is a ~1% error per qubit in the reset operation.
Thermal noise from "traditional" reset is not routinely characterized but is of the same
order.
:param symmetrize_readout: Method used to symmetrize the readout errors, i.e. set
p(0|1) = p(1|0). For uncorrelated readout errors, this can be achieved by randomly
selecting between the POVMs {X.D1.X, X.D0.X} and {D0, D1} (where both D0 and D1 are
diagonal). However, here we currently support exhaustive symmetrization and loop through
all possible 2^n POVMs {X/I . POVM . X/I}^n, and obtain symmetrization more generally,
i.e. set p(00|00) = p(01|01) = .. = p(11|11), as well as p(00|01) = p(01|00) etc. If this
is None, no symmetrization is performed. The exhaustive method can be specified by setting
this variable to 'exhaustive' (default value). Set to `None` if no symmetrization is
desired.
:param calibrate_readout: Method used to calibrate the readout results. Currently, the only
method supported is normalizing against the operator's expectation value in its +1
eigenstate, which can be specified by setting this variable to 'plus-eig' (default value).
The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired.
:param show_progress_bar: displays a progress bar via tqdm if true.
"""
if readout_symmetrize is not None:
warnings.warn("'readout_symmetrize' has been renamed to 'symmetrize_readout'",
DeprecationWarning)
symmetrize_readout = readout_symmetrize
# calibration readout only works with symmetrization turned on
if calibrate_readout is not None and symmetrize_readout is None:
raise ValueError("Readout calibration only works with readout symmetrization turned on")
# generate programs for each group of simultaneous settings.
programs, meas_qubits = generate_experiment_programs(tomo_experiment, active_reset)
# Outer loop over a collection of grouped settings for which we can simultaneously
# estimate.
for i, (prog, qubits, settings) in enumerate(zip(tqdm(programs, disable=not show_progress_bar),
meas_qubits, tomo_experiment)):
if symmetrize_readout == 'exhaustive' and len(qubits) > 0:
bitstrings, d_qub_idx = _exhaustive_symmetrization(qc, qubits, n_shots, prog)
elif symmetrize_readout is None and len(qubits) > 0:
total_prog_no_symm = prog.copy()
ro = total_prog_no_symm.declare('ro', 'BIT', len(qubits))
d_qub_idx = {}
for j, q in enumerate(qubits):
total_prog_no_symm += MEASURE(q, ro[j])
# Keep track of qubit-classical register mapping via dict
d_qub_idx[q] = j
total_prog_no_symm.wrap_in_numshots_loop(n_shots)
total_prog_no_symm_native = qc.compiler.quil_to_native_quil(total_prog_no_symm)
total_prog_no_symm_bin = qc.compiler.native_quil_to_executable(total_prog_no_symm_native)
bitstrings = qc.run(total_prog_no_symm_bin)
elif len(qubits) == 0:
# looks like an identity operation
pass
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
if progress_callback is not None:
progress_callback(i, len(tomo_experiment))
# 3. Post-process
# Inner loop over the grouped settings. They only differ in which qubits' measurements
# we include in the post-processing. For example, if `settings` is Z1, Z2, Z1Z2 and we
# measure (n_shots, n_qubits=2) obs_strings then the full operator value involves selecting
# either the first column, second column, or both and multiplying along the row.
for setting in settings:
# 3.1 Get the term's coefficient so we can multiply it in later.
coeff = complex(setting.out_operator.coefficient)
if not np.isclose(coeff.imag, 0):
raise ValueError(f"{setting}'s out_operator has a complex coefficient.")
coeff = coeff.real
# 3.2 Special case for measuring the "identity" operator, which doesn't make much
# sense but should happen perfectly.
if is_identity(setting.out_operator):
yield ExperimentResult(
setting=setting,
expectation=coeff,
std_err=0.0,
total_counts=n_shots,
)
continue
# 3.3 Obtain statistics from result of experiment
obs_mean, obs_var = _stats_from_measurements(bitstrings, d_qub_idx, setting, n_shots, coeff)
if calibrate_readout == 'plus-eig':
# 4 Readout calibration
# 4.1 Obtain calibration program
calibr_prog = _calibration_program(qc, tomo_experiment, setting)
# 4.2 Perform symmetrization on the calibration program
if symmetrize_readout == 'exhaustive':
qubs_calibr = setting.out_operator.get_qubits()
calibr_shots = n_shots
calibr_results, d_calibr_qub_idx = _exhaustive_symmetrization(qc, qubs_calibr, calibr_shots, calibr_prog)
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
# 4.3 Obtain statistics from the measurement process
obs_calibr_mean, obs_calibr_var = _stats_from_measurements(calibr_results, d_calibr_qub_idx, setting, calibr_shots)
# 4.3 Calibrate the readout results
corrected_mean = obs_mean / obs_calibr_mean
corrected_var = ratio_variance(obs_mean, obs_var, obs_calibr_mean, obs_calibr_var)
yield ExperimentResult(
setting=setting,
expectation=corrected_mean.item(),
std_err=np.sqrt(corrected_var).item(),
total_counts=n_shots,
raw_expectation=obs_mean.item(),
raw_std_err=np.sqrt(obs_var).item(),
calibration_expectation=obs_calibr_mean.item(),
calibration_std_err=np.sqrt(obs_calibr_var).item(),
calibration_counts=calibr_shots,
)
elif calibrate_readout is None:
# No calibration
yield ExperimentResult(
setting=setting,
expectation=obs_mean.item(),
std_err=np.sqrt(obs_var).item(),
total_counts=n_shots,
)
else:
raise ValueError("Calibration readout method must be either 'plus-eig' or None")
| def measure_observables(qc: QuantumComputer, tomo_experiment: TomographyExperiment,
n_shots: int = 10000,
progress_callback: Optional[Callable[[int, int], None]] = None,
active_reset = False,
symmetrize_readout: Optional[str] = 'exhaustive',
calibrate_readout: Optional[str] = 'plus-eig',
readout_symmetrize: Optional[str] = None,
show_progress_bar: bool = False):
"""
Measure all the observables in a TomographyExperiment.
:param qc: A QuantumComputer which can run quantum programs
:param tomo_experiment: A suite of tomographic observables to measure
:param n_shots: The number of shots to take per ExperimentSetting
:param progress_callback: If not None, this function is called each time a group of
settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress
is ``i / len(tomo_experiment)``.
:param active_reset: Whether to actively reset qubits instead of waiting several
times the coherence length for qubits to decay to |0> naturally. Setting this
to True is much faster but there is a ~1% error per qubit in the reset operation.
Thermal noise from "traditional" reset is not routinely characterized but is of the same
order.
:param symmetrize_readout: Method used to symmetrize the readout errors, i.e. set
p(0|1) = p(1|0). For uncorrelated readout errors, this can be achieved by randomly
selecting between the POVMs {X.D1.X, X.D0.X} and {D0, D1} (where both D0 and D1 are
diagonal). However, here we currently support exhaustive symmetrization and loop through
all possible 2^n POVMs {X/I . POVM . X/I}^n, and obtain symmetrization more generally,
i.e. set p(00|00) = p(01|01) = .. = p(11|11), as well as p(00|01) = p(01|00) etc. If this
is None, no symmetrization is performed. The exhaustive method can be specified by setting
this variable to 'exhaustive' (default value). Set to `None` if no symmetrization is
desired.
:param calibrate_readout: Method used to calibrate the readout results. Currently, the only
method supported is normalizing against the operator's expectation value in its +1
eigenstate, which can be specified by setting this variable to 'plus-eig' (default value).
The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired.
:param show_progress_bar: displays a progress bar via tqdm if true.
"""
if readout_symmetrize is not None:
warnings.warn("'readout_symmetrize' has been renamed to 'symmetrize_readout'",
DeprecationWarning)
symmetrize_readout = readout_symmetrize
# calibration readout only works with symmetrization turned on
if calibrate_readout is not None and symmetrize_readout is None:
raise ValueError("Readout calibration only works with readout symmetrization turned on")
# generate programs for each group of simultaneous settings.
programs, meas_qubits = generate_experiment_programs(tomo_experiment, active_reset)
# Outer loop over a collection of grouped settings for which we can simultaneously
# estimate.
for i, (prog, qubits, settings) in enumerate(zip(tqdm(programs, disable=not show_progress_bar),
meas_qubits, tomo_experiment)):
if symmetrize_readout == 'exhaustive' and len(qubits) > 0:
bitstrings, d_qub_idx = _exhaustive_symmetrization(qc, qubits, n_shots, prog)
elif symmetrize_readout is None and len(qubits) > 0:
total_prog_no_symm = prog.copy()
ro = total_prog_no_symm.declare('ro', 'BIT', len(qubits))
d_qub_idx = {}
for j, q in enumerate(qubits):
total_prog_no_symm += MEASURE(q, ro[j])
# Keep track of qubit-classical register mapping via dict
d_qub_idx[q] = j
total_prog_no_symm.wrap_in_numshots_loop(n_shots)
total_prog_no_symm_native = qc.compiler.quil_to_native_quil(total_prog_no_symm)
total_prog_no_symm_bin = qc.compiler.native_quil_to_executable(total_prog_no_symm_native)
bitstrings = qc.run(total_prog_no_symm_bin)
elif len(qubits) == 0:
# looks like an identity operation
pass
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
if progress_callback is not None:
progress_callback(i, len(tomo_experiment))
# 3. Post-process
# Inner loop over the grouped settings. They only differ in which qubits' measurements
# we include in the post-processing. For example, if `settings` is Z1, Z2, Z1Z2 and we
# measure (n_shots, n_qubits=2) obs_strings then the full operator value involves selecting
# either the first column, second column, or both and multiplying along the row.
for setting in settings:
# 3.1 Get the term's coefficient so we can multiply it in later.
coeff = complex(setting.out_operator.coefficient)
if not np.isclose(coeff.imag, 0):
raise ValueError(f"{setting}'s out_operator has a complex coefficient.")
coeff = coeff.real
# 3.2 Special case for measuring the "identity" operator, which doesn't make much
# sense but should happen perfectly.
if is_identity(setting.out_operator):
yield ExperimentResult(
setting=setting,
expectation=coeff,
std_err=0.0,
total_counts=n_shots,
)
continue
# 3.3 Obtain statistics from result of experiment
obs_mean, obs_var = _stats_from_measurements(bitstrings, d_qub_idx, setting, n_shots, coeff)
if calibrate_readout == 'plus-eig':
# 4 Readout calibration
# 4.1 Obtain calibration program
calibr_prog = _calibration_program(qc, tomo_experiment, setting)
# 4.2 Perform symmetrization on the calibration program
if symmetrize_readout == 'exhaustive':
qubs_calibr = setting.out_operator.get_qubits()
calibr_shots = n_shots
calibr_results, d_calibr_qub_idx = _exhaustive_symmetrization(qc, qubs_calibr, calibr_shots, calibr_prog)
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
# 4.3 Obtain statistics from the measurement process
obs_calibr_mean, obs_calibr_var = _stats_from_measurements(calibr_results, d_calibr_qub_idx, setting, calibr_shots)
# 4.3 Calibrate the readout results
corrected_mean = obs_mean / obs_calibr_mean
corrected_var = ratio_variance(obs_mean, obs_var, obs_calibr_mean, obs_calibr_var)
yield ExperimentResult(
setting=setting,
expectation=corrected_mean.item(),
std_err=np.sqrt(corrected_var).item(),
total_counts=n_shots,
raw_expectation=obs_mean.item(),
raw_std_err=np.sqrt(obs_var).item(),
calibration_expectation=obs_calibr_mean.item(),
calibration_std_err=np.sqrt(obs_calibr_var).item(),
calibration_counts=calibr_shots,
)
elif calibrate_readout is None:
# No calibration
yield ExperimentResult(
setting=setting,
expectation=obs_mean.item(),
std_err=np.sqrt(obs_var).item(),
total_counts=n_shots,
)
else:
raise ValueError("Calibration readout method must be either 'plus-eig' or None")
|
6,074 | def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueDict = {}
for site in siteDict:
for ce in siteDict[site]:
ceDict = siteDict[site][ce]
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict[queueName] = qDict[queue]
queueDict[queueName] = qDict[queue]
queueDict[queueName]['Queue'] = queue
queueDict[queueName]['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict[queueName] and \
"SI00" in queueDict[queueName]:
maxCPUTime = float(queueDict[queueName]['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict[queueName]['SI00'])
queueCPUTime = 60. / 250. * maxCPUTime * si00
queueDict[queueName]['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, basestring):
ceTags = fromChar(ceTags)
queueTags = queueDict[queueName].get(tagFieldName)
if queueTags and isinstance(queueTags, basestring):
queueTags = fromChar(queueTags)
queueDict[queueName][tagFieldName] = queueTags
if ceTags:
if queueTags:
allTags = list(set(ceTags + queueTags))
queueDict[queueName][tagFieldName] = allTags
else:
queueDict[queueName][tagFieldName] = ceTags
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict[queueName].get(parameter)
ceParameter = ceDict.get(parameter)
if ceParameter or queueParameter:
queueDict[queueName][parameter] = ceParameter if not queueParameter \
else queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict[queueName].get('NumberOfProcessors', 1) > 1:
queueDict[queueName].setdefault('Tag', []).append('MultiProcessor')
queueDict[queueName]['CEName'] = ce
queueDict[queueName]['GridCE'] = ce
queueDict[queueName]['CEType'] = ceDict['CEType']
queueDict[queueName]['GridMiddleware'] = ceDict['CEType']
queueDict[queueName]['QueueName'] = queue
platform = ''
if "Platform" in queueDict[queueName]:
platform = queueDict[queueName]['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
elif "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict[queueName]['Platform'] = platform
if "Platform" not in queueDict[queueName] and platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict[queueName]['Platform'] = result['Value'][0]
return S_OK(queueDict)
| def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueDict = {}
for site in siteDict:
for ce in siteDict[site]:
ceDict = siteDict[site][ce]
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict[queueName] = qDict[queue]
queueDict[queueName] = qDict[queue]
queueDict[queueName]['Queue'] = queue
queueDict[queueName]['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict[queueName] and \
"SI00" in queueDict[queueName]:
maxCPUTime = float(queueDict[queueName]['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict[queueName]['SI00'])
queueCPUTime = 60. / 250. * maxCPUTime * si00
queueDict[queueName]['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, six.string_types):
ceTags = fromChar(ceTags)
queueTags = queueDict[queueName].get(tagFieldName)
if queueTags and isinstance(queueTags, basestring):
queueTags = fromChar(queueTags)
queueDict[queueName][tagFieldName] = queueTags
if ceTags:
if queueTags:
allTags = list(set(ceTags + queueTags))
queueDict[queueName][tagFieldName] = allTags
else:
queueDict[queueName][tagFieldName] = ceTags
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict[queueName].get(parameter)
ceParameter = ceDict.get(parameter)
if ceParameter or queueParameter:
queueDict[queueName][parameter] = ceParameter if not queueParameter \
else queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict[queueName].get('NumberOfProcessors', 1) > 1:
queueDict[queueName].setdefault('Tag', []).append('MultiProcessor')
queueDict[queueName]['CEName'] = ce
queueDict[queueName]['GridCE'] = ce
queueDict[queueName]['CEType'] = ceDict['CEType']
queueDict[queueName]['GridMiddleware'] = ceDict['CEType']
queueDict[queueName]['QueueName'] = queue
platform = ''
if "Platform" in queueDict[queueName]:
platform = queueDict[queueName]['Platform']
elif "Platform" in ceDict:
platform = ceDict['Platform']
elif "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict[queueName]['Platform'] = platform
if "Platform" not in queueDict[queueName] and platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict[queueName]['Platform'] = result['Value'][0]
return S_OK(queueDict)
|
13,563 | def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
| def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k += 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
2,666 | def _get_response_values(
estimator,
X,
y_true,
response_method,
pos_label=None,
target_type=None,
):
"""Compute the response values of a classifier or a regressor.
The response values are predictions, one scalar value for each sample in X
that depends on the specific choice of `response_method`.
This helper only accepts multiclass classifiers with the `predict` response
method.
If `estimator` is a binary classifier, also return the label for the
effective positive class.
.. versionadded:: 1.1
Parameters
----------
estimator : estimator instance
Fitted classifier or regressor or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y_true : array-like of shape (n_samples,)
The true label.
response_method : {"predict_proba", "decision_function", "predict"} or \
list of such str
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`decision_function` or
:term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
pos_label : str or int, default=None
The class considered as the positive class when computing
the metrics. By default, `estimators.classes_[1]` is
considered as the positive class.
target_type : str, default=None
The type of the target `y` as returned by
:func:`~sklearn.utils.multiclass.type_of_target`. If `None`, the type
will be inferred by calling :func:`~sklearn.utils.multiclass.type_of_target`.
Providing the type of the target could save time by avoid calling the
:func:`~sklearn.utils.multiclass.type_of_target` function.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Target scores calculated from the provided response_method
and `pos_label`.
pos_label : str, int or None
The class considered as the positive class when computing
the metrics. Returns `None` if `estimator` is a regressor.
Raises
------
ValueError
If `pos_label` is not a valid label.
If the shape of `y_pred` is not consistent for binary classifier.
If the response method can be applied to a classifier only and
`estimator` is a regressor.
"""
from sklearn.base import is_classifier # noqa
if is_classifier(estimator):
if target_type is None:
target_type = type_of_target(y_true)
prediction_method = _check_response_method(estimator, response_method)
y_pred = prediction_method(X)
classes = estimator.classes_
if pos_label is not None and pos_label not in classes.tolist():
raise ValueError(
f"pos_label={pos_label} is not a valid label: It should be "
f"one of {classes}"
)
elif pos_label is None and target_type == "binary":
pos_label = pos_label if pos_label is not None else classes[-1]
if prediction_method.__name__ == "predict_proba":
if target_type == "binary" and y_pred.shape[1] <= 2:
if y_pred.shape[1] == 2:
col_idx = np.flatnonzero(classes == pos_label)[0]
y_pred = y_pred[:, col_idx]
else:
err_msg = (
f"Got predict_proba of shape {y_pred.shape}, but need "
"classifier with two classes."
)
raise ValueError(err_msg)
elif prediction_method.__name__ == "decision_function":
if target_type == "binary":
if pos_label == classes[0]:
y_pred *= -1
else:
if response_method != "predict":
raise ValueError(f"{estimator.__class__.__name__} should be a classifier")
y_pred, pos_label = estimator.predict(X), None
return y_pred, pos_label
| def _get_response_values(
estimator,
X,
y_true,
response_method,
pos_label=None,
target_type=None,
):
"""Compute the response values of a classifier or a regressor.
The response values are predictions, one scalar value for each sample in X
that depends on the specific choice of `response_method`.
This helper only accepts multiclass classifiers with the `predict` response
method.
If `estimator` is a binary classifier, also return the label for the
effective positive class.
.. versionadded:: 1.1
Parameters
----------
estimator : estimator instance
Fitted classifier or regressor or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier or regressor.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y_true : array-like of shape (n_samples,)
The true label.
response_method : {"predict_proba", "decision_function", "predict"} or \
list of such str
Specifies the response method to use get prediction from an estimator
(i.e. :term:`predict_proba`, :term:`decision_function` or
:term:`predict`). Possible choices are:
- if `str`, it corresponds to the name to the method to return;
- if a list of `str`, it provides the method names in order of
preference. The method returned corresponds to the first method in
the list and which is implemented by `estimator`.
pos_label : str or int, default=None
The class considered as the positive class when computing
the metrics. By default, `estimators.classes_[1]` is
considered as the positive class.
target_type : str, default=None
The type of the target `y` as returned by
:func:`~sklearn.utils.multiclass.type_of_target`. If `None`, the type
will be inferred by calling :func:`~sklearn.utils.multiclass.type_of_target`.
Providing the type of the target could save time by avoid calling the
:func:`~sklearn.utils.multiclass.type_of_target` function.
Returns
-------
y_pred : ndarray of shape (n_samples,)
Target scores calculated from the provided response_method
and `pos_label`.
pos_label : str, int or None
The class considered as the positive class when computing
the metrics. Returns `None` if `estimator` is a regressor.
Raises
------
ValueError
If `pos_label` is not a valid label.
If the shape of `y_pred` is not consistent for binary classifier.
If the response method can be applied to a classifier only and
`estimator` is a regressor.
"""
from sklearn.base import is_classifier # noqa
if is_classifier(estimator):
if target_type is None:
target_type = type_of_target(y_true)
prediction_method = _check_response_method(estimator, response_method)
y_pred = prediction_method(X)
classes = estimator.classes_
if pos_label is not None and pos_label not in classes.tolist():
raise ValueError(
f"pos_label={pos_label} is not a valid label: It should be "
f"one of {classes}"
)
elif pos_label is None and target_type == "binary":
pos_label = pos_label if pos_label is not None else classes[-1]
if prediction_method.__name__ == "predict_proba":
if target_type == "binary" and y_pred.shape[1] <= 2:
if y_pred.shape[1] == 2:
col_idx = np.flatnonzero(classes == pos_label)[0]
y_pred = y_pred[:, col_idx]
else:
err_msg = (
f"Got predict_proba of shape {y_pred.shape}, but need "
"classifier with two classes."
)
raise ValueError(err_msg)
elif prediction_method.__name__ == "decision_function":
if target_type == "binary":
if pos_label == classes[0]:
y_pred *= -1
else:
if response_method != "predict":
raise ValueError(f"{estimator.__class__.__name__} should be a classifier")
y_pred, pos_label = estimator.predict(X), None
return y_pred, pos_label
|
23,637 | def boland(ghi, zenith, datetime_or_doy, min_cos_zenith=0.065, max_zenith=87):
r"""
Estimate DNI and DHI from GHI using the Boland clearness index model.
The Boland model [1]_, [2]_ estimates the diffuse fraction, DF, from global
horizontal irradiance, GHI, through an empirical relationship between DF
and the ratio of GHI to extraterrestrial irradiance or clearness index, kt.
.. math::
\mathit{DF} = \frac{1}{1 + \exp\left(-5 + 8.6 k_t\right)}
where :math:`k_t` is the clearness index.
Parameters
----------
ghi: numeric
Global horizontal irradiance in W/m^2.
zenith: numeric
True (not refraction-corrected) zenith angles in decimal degrees.
datetime_or_doy : int, float, array, pd.DatetimeIndex
Day of year or array of days of year e.g.
pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
data : OrderedDict or DataFrame
Contains the following keys/columns:
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
* ``kt``: Ratio of global to extraterrestrial irradiance
on a horizontal plane.
References
----------
.. [1] John Boland, Lynne Scott, and Mark Luther, Modelling the diffuse
fraction of global solar radiation on a horizontal surface,
Environmetrics 12(2), pp 103-116, 2001,
:doi:`10.1002/1099-095X(200103)12:2%3C103::AID-ENV447%3E3.0.CO;2-2`
.. [2] J. Boland, B. Ridley (2008) Models of Diffuse Solar Fraction. In:
Badescu V. (eds) Modeling Solar Radiation at the Earth’s Surface.
Springer, Berlin, Heidelberg. :doi:`10.1007/978-3-540-77455-6_8`
See also
--------
dirint
disc
erbs
"""
dni_extra = get_extra_radiation(datetime_or_doy)
kt = clearness_index(ghi, zenith, dni_extra, min_cos_zenith=min_cos_zenith,
max_clearness_index=1)
# Boland equation
df = 1.0 / (1.0 + np.exp(-5.0 + 8.6 * kt))
# NOTE: [1] has different coefficients, for different time intervals
# 15-min: df = 1 / (1 + exp(8.645 * (kt - 0.613)))
# 1-hour: df = 1 / (1 + exp(7.997 * (kt - 0.586)))
dhi = df * ghi
dni = (ghi - dhi) / tools.cosd(zenith)
bad_values = (zenith > max_zenith) | (ghi < 0) | (dni < 0)
dni = np.where(bad_values, 0, dni)
# ensure that closure relationship remains valid
dhi = np.where(bad_values, ghi, dhi)
data = OrderedDict()
data['dni'] = dni
data['dhi'] = dhi
data['kt'] = kt
if isinstance(datetime_or_doy, pd.DatetimeIndex):
data = pd.DataFrame(data, index=datetime_or_doy)
return data
| def boland(ghi, zenith, datetime_or_doy, min_cos_zenith=0.065, max_zenith=87):
r"""
Estimate DNI and DHI from GHI using the Boland clearness index model.
The Boland model [1]_, [2]_ estimates the diffuse fraction, DF, from global
horizontal irradiance, GHI, through an empirical relationship between DF
and the ratio of GHI to extraterrestrial irradiance or clearness index, kt.
.. math::
\mathit{DF} = \frac{1}{1 + \exp\left(-5 + 8.6 k_t\right)}
where :math:`k_t` is the clearness index.
Parameters
----------
ghi: numeric
Global horizontal irradiance in W/m^2.
zenith: numeric
True (not refraction-corrected) zenith angles in decimal degrees.
datetime_or_doy : int, float, array, DatetimeIndex
Day of year or array of days of year e.g.
pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
data : OrderedDict or DataFrame
Contains the following keys/columns:
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
* ``kt``: Ratio of global to extraterrestrial irradiance
on a horizontal plane.
References
----------
.. [1] John Boland, Lynne Scott, and Mark Luther, Modelling the diffuse
fraction of global solar radiation on a horizontal surface,
Environmetrics 12(2), pp 103-116, 2001,
:doi:`10.1002/1099-095X(200103)12:2%3C103::AID-ENV447%3E3.0.CO;2-2`
.. [2] J. Boland, B. Ridley (2008) Models of Diffuse Solar Fraction. In:
Badescu V. (eds) Modeling Solar Radiation at the Earth’s Surface.
Springer, Berlin, Heidelberg. :doi:`10.1007/978-3-540-77455-6_8`
See also
--------
dirint
disc
erbs
"""
dni_extra = get_extra_radiation(datetime_or_doy)
kt = clearness_index(ghi, zenith, dni_extra, min_cos_zenith=min_cos_zenith,
max_clearness_index=1)
# Boland equation
df = 1.0 / (1.0 + np.exp(-5.0 + 8.6 * kt))
# NOTE: [1] has different coefficients, for different time intervals
# 15-min: df = 1 / (1 + exp(8.645 * (kt - 0.613)))
# 1-hour: df = 1 / (1 + exp(7.997 * (kt - 0.586)))
dhi = df * ghi
dni = (ghi - dhi) / tools.cosd(zenith)
bad_values = (zenith > max_zenith) | (ghi < 0) | (dni < 0)
dni = np.where(bad_values, 0, dni)
# ensure that closure relationship remains valid
dhi = np.where(bad_values, ghi, dhi)
data = OrderedDict()
data['dni'] = dni
data['dhi'] = dhi
data['kt'] = kt
if isinstance(datetime_or_doy, pd.DatetimeIndex):
data = pd.DataFrame(data, index=datetime_or_doy)
return data
|
42,089 | def updates_state(f: Callable[..., Any]) -> Callable[..., Any]:
"""Method decorator to fetch updated trial state from rank 0 after f is run.
This decorator ensures trial properties (params, distributions, etc.) on all distributed
processes are up-to-date with the wrapped trial stored on rank 0.
It should be applied to all Trial methods that update property values.
"""
@functools.wraps(f)
def wrapped(self: Any, *args, **kwargs) -> Any:
def state() -> Sequence:
assert self._delegate is not None
return (
self._delegate.number,
self._delegate.params,
self._delegate.distributions,
self._delegate.user_attrs,
self._delegate.system_attrs,
self._delegate.datetime_start,
)
try:
return f(self, *args, **kwargs)
finally:
(
self._number,
self._params,
self._distributions,
self._user_attrs,
self._system_attrs,
self._datetime_start,
) = self._call_and_communicate_obj(state)
return wrapped
| def updates_state(f: Callable[..., Any]) -> Callable[..., Any]:
"""Method decorator to fetch updated trial state from rank 0 after f is run.
This decorator ensures trial properties (params, distributions, etc.) on all distributed
processes are up-to-date with the wrapped trial stored on rank 0.
It should be applied to all :class:`~optuna.integration.TorchDistributedTrial` methods that update property values.
"""
@functools.wraps(f)
def wrapped(self: Any, *args, **kwargs) -> Any:
def state() -> Sequence:
assert self._delegate is not None
return (
self._delegate.number,
self._delegate.params,
self._delegate.distributions,
self._delegate.user_attrs,
self._delegate.system_attrs,
self._delegate.datetime_start,
)
try:
return f(self, *args, **kwargs)
finally:
(
self._number,
self._params,
self._distributions,
self._user_attrs,
self._system_attrs,
self._datetime_start,
) = self._call_and_communicate_obj(state)
return wrapped
|
42,931 | def _node_coords(graph: nx.Graph, l: dict) -> Tuple:
""" Provides the coordinates for the graph nodes when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of nodes and their respective coordinates
Returns:
Tuple: x and y coordinates for each node
"""
n_x = []
n_y = []
for n in graph.nodes():
n_x.append(l[n][0])
n_y.append(l[n][1])
return {"x": n_x, "y": n_y}
| def _node_coords(graph: nx.Graph, l: dict) -> dict:
""" Provides the coordinates for the graph nodes when given an input graph layout.
Args:
graph (nx.Graph): input graph
l (dict): dictionary of nodes and their respective coordinates
Returns:
Tuple: x and y coordinates for each node
"""
n_x = []
n_y = []
for n in graph.nodes():
n_x.append(l[n][0])
n_y.append(l[n][1])
return {"x": n_x, "y": n_y}
|
41,554 | def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", dest="model", required=True, type=str,
help="Path to .pt model.", metavar=Metavar.str)
parser.add_argument("-d", "--dimension", dest="dimension", required=True,
type=int, help="Input dimension (2 for 2D inputs, 3 for 3D inputs).",
metavar=Metavar.int)
parser.add_argument("-n", "--n_channels", dest="n_channels", default=1, type=int,
help="Number of input channels of the model.",
metavar=Metavar.int)
parser.add_argument("-g", "--gpu", dest="gpu", default=0, type=str,
help="GPU number if available.", metavar=Metavar.str)
return parser
| def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", dest="model", required=True, type=str,
help="Path to .pt model.", metavar=Metavar.str)
parser.add_argument("-d", "--dimension", dest="dimension", required=True,
type=int, help="Input dimension (2 for 2D inputs, 3 for 3D inputs).",
metavar=Metavar.int)
parser.add_argument("-n", "--n_channels", dest="n_channels", default=1, type=int,
help="Number of input channels of the model.",
metavar=Metavar.int)
parser.add_argument("-g", "--gpu", dest="gpu", default=0, type=str,
help="GPU number if available.", metavar=Metavar.int)
return parser
|
41,515 | def fixed_poi_fit(
poi_val, data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs
):
r"""
Run a maximum likelihood fit with the POI value fixed.
This is done by minimizing the objective function of :func:`~pyhf.infer.mle.twice_nll`
of the model parameters given the observed data, for a given fixed value of :math:`\mu`.
This is used to produce the constrained maximal likelihood for the given :math:`\mu`
,:math:`L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)`, in the profile
likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`
.. math::
\lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}
.. note::
:func:`twice_nll` is the objective function given to the optimizer and
is returned evaluated at the best fit model parameters when the optional
kwarg ``return_fitted_val`` is ``True``.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_poi = 1.0
>>> bestfit_pars, twice_nll = pyhf.infer.mle.fixed_poi_fit(
... test_poi, data, model, return_fitted_val=True
... )
>>> bestfit_pars
array([1. , 0.97224597, 0.87553894])
>>> twice_nll
array(28.92218013)
>>> -2 * model.logpdf(bestfit_pars, data) == twice_nll
array([ True])
Args:
data: The data
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
init_pars (:obj:`list`): Values to initialize the model parameters at for the fit
par_bounds (:obj:`list` of :obj:`list`\s or :obj:`tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
fixed_params (:obj:`list`): Parameters to be held constant in the fit.
kwargs: Keyword arguments passed through to the optimizer API
Returns:
See optimizer API
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required to fit with a fixed POI.'
)
init_pars = [*(init_pars or pdf.config.suggested_init())]
fixed_params = [*(fixed_params or pdf.config.suggested_fixed())]
init_pars[pdf.config.poi_index] = poi_val
fixed_params[pdf.config.poi_index] = True
return fit(data, pdf, init_pars, par_bounds, fixed_params, **kwargs)
| def fixed_poi_fit(
poi_val, data, pdf, init_pars=None, par_bounds=None, fixed_params=None, **kwargs
):
r"""
Run a maximum likelihood fit with the POI value fixed.
This is done by minimizing the objective function of :func:`~pyhf.infer.mle.twice_nll`
of the model parameters given the observed data, for a given fixed value of :math:`\mu`.
This is used to produce the constrained maximal likelihood for the given :math:`\mu`
:math:`L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)`, in the profile
likelihood ratio in Equation (7) in :xref:`arXiv:1007.1727`
.. math::
\lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}
.. note::
:func:`twice_nll` is the objective function given to the optimizer and
is returned evaluated at the best fit model parameters when the optional
kwarg ``return_fitted_val`` is ``True``.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_poi = 1.0
>>> bestfit_pars, twice_nll = pyhf.infer.mle.fixed_poi_fit(
... test_poi, data, model, return_fitted_val=True
... )
>>> bestfit_pars
array([1. , 0.97224597, 0.87553894])
>>> twice_nll
array(28.92218013)
>>> -2 * model.logpdf(bestfit_pars, data) == twice_nll
array([ True])
Args:
data: The data
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
init_pars (:obj:`list`): Values to initialize the model parameters at for the fit
par_bounds (:obj:`list` of :obj:`list`\s or :obj:`tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
fixed_params (:obj:`list`): Parameters to be held constant in the fit.
kwargs: Keyword arguments passed through to the optimizer API
Returns:
See optimizer API
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required to fit with a fixed POI.'
)
init_pars = [*(init_pars or pdf.config.suggested_init())]
fixed_params = [*(fixed_params or pdf.config.suggested_fixed())]
init_pars[pdf.config.poi_index] = poi_val
fixed_params[pdf.config.poi_index] = True
return fit(data, pdf, init_pars, par_bounds, fixed_params, **kwargs)
|
55,025 | def pauli_mult(pauli_1, pauli_2, wire_map=None):
"""Multiply two Pauli words together.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations.
Args:
pauli_1 (qml.Operation): A Pauli word.
pauli_2 (qml.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
(qml.Operation): The product of pauli_1 and pauli_2 as a Pauli word
(ignoring the global phase).
**Example**
This function enables multiplication of Pauli group elements at the level of
Pauli words, rather than matrices. For example,
.. code-block:: python
import pennylane as qml
from pennylane.grouping.pauli_group import pauli_mult
pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
product = pauli_mult(pauli_1, pauli_2)
print(product)
will yield ``qml.PauliZ(0)``.
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity
if are_identical_pauli_words(pauli_1, pauli_2):
first_wire = list(wire_map.keys())[0]
return Identity(first_wire)
# Compute binary symplectic representations
pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map)
pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map)
bin_symp_1 = np.array([int(x) for x in pauli_1_binary])
bin_symp_2 = np.array([int(x) for x in pauli_2_binary])
# Shorthand for bitwise XOR of numpy arrays
pauli_product = bin_symp_1 ^ bin_symp_2
return binary_to_pauli(pauli_product, wire_map=wire_map)
| def pauli_mult(pauli_1, pauli_2, wire_map=None):
"""Multiply two Pauli words together.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations.
Args:
pauli_1 (qml.Operation): A Pauli word.
pauli_2 (qml.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
(qml.Operation): The product of pauli_1 and pauli_2 as a Pauli word
(ignoring the global phase).
**Example**
This function enables multiplication of Pauli group elements at the level of
Pauli words, rather than matrices. For example,
>>> from pennylane.grouping.pauli_group import pauli_mult
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product = pauli_mult(pauli_1, pauli_2)
>>> print(product)
PauliZ(wires=[0])
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Check if pauli_1 and pauli_2 are the same; if so, the result is the Identity
if are_identical_pauli_words(pauli_1, pauli_2):
first_wire = list(wire_map.keys())[0]
return Identity(first_wire)
# Compute binary symplectic representations
pauli_1_binary = pauli_to_binary(pauli_1, wire_map=wire_map)
pauli_2_binary = pauli_to_binary(pauli_2, wire_map=wire_map)
bin_symp_1 = np.array([int(x) for x in pauli_1_binary])
bin_symp_2 = np.array([int(x) for x in pauli_2_binary])
# Shorthand for bitwise XOR of numpy arrays
pauli_product = bin_symp_1 ^ bin_symp_2
return binary_to_pauli(pauli_product, wire_map=wire_map)
|
31,386 | def bang_domain(client: Client, score_calculator: ScoreCalculator, args: dict) -> CommandResults:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
domain = args['domain']
raw_response = client.domain(domain)
data = raw_response['data']
attributes = data['attributes']
whois = get_whois(attributes['whois'])
domain_standard = assign_params(
Name=domain,
CreationDate=whois['Creation Date'],
UpdatedDate=whois['Updated Date'],
ExpirationDate=whois['Registry Expiry Date'],
NameServers=whois['Name Server'],
Admin=assign_params(
Name=whois['Admin Organization'],
Email=whois['Admin Email'],
Country=whois['Admin Country'],
),
Registrant=assign_params(
Country=whois['Registrant Country'],
Email=whois['Registrant Email']
),
WHOIS=assign_params(
CreationDate=whois['Creation Date'],
UpdatedDate=whois['Updated Date'],
ExpirationDate=whois['Registry Expiry Date'],
Registrar=assign_params(
Name=whois['Registrar'],
AbuseEmail=whois['Registrar Abuse Contact Email'],
AbusePhone=whois['Registrar Abuse Contact Phone'],
),
Admin=assign_params(
Name=whois['Admin Organization'],
Email=whois['Admin Email']
)
)
)
score = score_calculator.domain_score(domain, raw_response)
if score != Common.DBotScore.BAD and client.is_premium:
score = score_calculator.analyze_premium_domain_score(client, domain, score)
logs = score_calculator.get_logs()
demisto.debug(logs)
if score == Common.DBotScore.BAD:
domain_standard['Malicious'] = {
'Vendor': INTEGRATION_NAME,
'Description': logs
}
context = {
f'{INTEGRATION_ENTRY_CONTEXT}.Domain(val.id && val.id === obj.id) ': data,
Common.Domain.CONTEXT_PATH: domain_standard
}
context.update(
Common.DBotScore(
domain,
DBotScoreType.DOMAIN,
INTEGRATION_NAME,
score,
malicious_description=logs
).to_context()
)
attributes = data['attributes']
return CommandResults(
readable_output=tableToMarkdown(
f'Domain data of {domain}',
{
'last_modified': epoch_to_timestamp(attributes['last_modification_date']),
**data,
**whois,
**attributes
},
headers=[
'id',
'Registrant Country',
'last_modified',
'last_analysis_stats'
],
removeNull=True
),
outputs=context,
raw_response=raw_response
)
| def domain_command(client: Client, score_calculator: ScoreCalculator, args: dict) -> CommandResults:
"""
1 API Call for regular
1-4 API Calls for premium subscriptions
"""
domain = args['domain']
raw_response = client.domain(domain)
data = raw_response['data']
attributes = data['attributes']
whois = get_whois(attributes['whois'])
domain_standard = assign_params(
Name=domain,
CreationDate=whois['Creation Date'],
UpdatedDate=whois['Updated Date'],
ExpirationDate=whois['Registry Expiry Date'],
NameServers=whois['Name Server'],
Admin=assign_params(
Name=whois['Admin Organization'],
Email=whois['Admin Email'],
Country=whois['Admin Country'],
),
Registrant=assign_params(
Country=whois['Registrant Country'],
Email=whois['Registrant Email']
),
WHOIS=assign_params(
CreationDate=whois['Creation Date'],
UpdatedDate=whois['Updated Date'],
ExpirationDate=whois['Registry Expiry Date'],
Registrar=assign_params(
Name=whois['Registrar'],
AbuseEmail=whois['Registrar Abuse Contact Email'],
AbusePhone=whois['Registrar Abuse Contact Phone'],
),
Admin=assign_params(
Name=whois['Admin Organization'],
Email=whois['Admin Email']
)
)
)
score = score_calculator.domain_score(domain, raw_response)
if score != Common.DBotScore.BAD and client.is_premium:
score = score_calculator.analyze_premium_domain_score(client, domain, score)
logs = score_calculator.get_logs()
demisto.debug(logs)
if score == Common.DBotScore.BAD:
domain_standard['Malicious'] = {
'Vendor': INTEGRATION_NAME,
'Description': logs
}
context = {
f'{INTEGRATION_ENTRY_CONTEXT}.Domain(val.id && val.id === obj.id) ': data,
Common.Domain.CONTEXT_PATH: domain_standard
}
context.update(
Common.DBotScore(
domain,
DBotScoreType.DOMAIN,
INTEGRATION_NAME,
score,
malicious_description=logs
).to_context()
)
attributes = data['attributes']
return CommandResults(
readable_output=tableToMarkdown(
f'Domain data of {domain}',
{
'last_modified': epoch_to_timestamp(attributes['last_modification_date']),
**data,
**whois,
**attributes
},
headers=[
'id',
'Registrant Country',
'last_modified',
'last_analysis_stats'
],
removeNull=True
),
outputs=context,
raw_response=raw_response
)
|
32,008 | def get_namespaces(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the list of the namespaces.
Implement the command 'prisma-cloud-compute-container-namespace-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-container-namespace-list command arguments
Returns:
CommandResults: command-results object.
"""
limit, _ = parse_limit_and_offset_values(limit=args.pop("limit", "50"))
if namespaces := filter_api_response(
api_response=client.get_namespaces(params=assign_params(**args)), limit=limit
):
# when the api returns [""] (a list with empty string), it means that the system does not have any namespaces
if len(namespaces) == 1 and namespaces[0] == "":
namespaces, table = [], "No results found"
else:
table = tableToMarkdown(
name="Namespaces",
t=[{"Namespace": namespace} for namespace in namespaces],
headers=["Namespace"]
)
else:
namespaces, table = [], "No results found"
return CommandResults(
outputs_prefix="PrismaCloudCompute.RadarContainerNamespace",
outputs=namespaces if namespaces else None,
readable_output=table,
raw_response=namespaces
)
| def get_namespaces(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Get the list of the namespaces.
Implement the command 'prisma-cloud-compute-container-namespace-list'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-container-namespace-list command arguments
Returns:
CommandResults: command-results object.
"""
limit, _ = parse_limit_and_offset_values(limit=args.pop("limit", "50"))
if namespaces := filter_api_response(
api_response=client.get_namespaces(params=assign_params(**args)), limit=limit
):
# when the api returns [""] (a list with empty string), it means that the system does not have any namespaces
if len(namespaces) == 1 and namespaces[0] == "":
namespaces, table = [], "No results found"
else:
table = tableToMarkdown(
name="Namespaces",
t=[{"Namespace": namespace} for namespace in namespaces],
headers=["Namespace"]
)
else:
namespaces, table = [], "No results found."
return CommandResults(
outputs_prefix="PrismaCloudCompute.RadarContainerNamespace",
outputs=namespaces if namespaces else None,
readable_output=table,
raw_response=namespaces
)
|
32,057 | def qradar_offense_update_command(client: Client, args: Dict) -> CommandResults:
"""
Updates offense that corresponds to the given offense ID.
possible arguments:
- offense_id (Required): Update offense that corresponds to ID given.
- protected: Whether the offense is protected.
- follow_up: Whether the offense should be marked for follow up.
- status: Status of the offense. One of 'OPEN', 'HIDDEN', 'CLOSED'.
- closing_reason_id: The ID of the reason the offense was closed. full list of closing reason IDs,
full list of closing reason IDs can be retrieved by 'qradar-closing-reasons' command.
- assigned_to: The user whom to assign the offense to.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id: int = int(args['offense_id'])
protected = args.get('protected')
follow_up = args.get('follow_up')
closing_reason_name = args.get('closing_reason_name')
status = args.get('status')
closing_reason_id = args.get('closing_reason_id')
if status == 'CLOSED' and (not closing_reason_id and not closing_reason_name):
raise DemistoException(
'''Closing reason ID must be provided when closing an offense. Available closing reasons can be achieved
by 'qradar-closing-reasons' command.'''
)
if closing_reason_name:
# if this call fails raise an error and stop command execution
closing_reasons_list = client.closing_reasons_list(include_deleted=True, include_reserved=True)
for closing_reason in closing_reasons_list:
if closing_reason.get('text') == closing_reason_name:
closing_reason_id = closing_reason.get('id')
if not closing_reason_id:
raise DemistoException(f'Could not find closing reason name {closing_reason_name}. Please provide a valid'
' closing reason name. Closing reasons can be retrieved by running the '
'qradar-closing-reasons command.')
assigned_to = args.get('assigned_to')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails raise an error and stop command execution
response = client.offense_update(offense_id, protected, follow_up, status, closing_reason_id, assigned_to,
fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('offense Update', final_outputs, headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
| def qradar_offense_update_command(client: Client, args: Dict) -> CommandResults:
"""
Updates offense that corresponds to the given offense ID.
possible arguments:
- offense_id (Required): Update offense that corresponds to ID given.
- protected: Whether the offense is protected.
- follow_up: Whether the offense should be marked for follow up.
- status: Status of the offense. One of 'OPEN', 'HIDDEN', 'CLOSED'.
- closing_reason_id: The ID of the reason the offense was closed. full list of closing reason IDs,
full list of closing reason IDs can be retrieved by 'qradar-closing-reasons' command.
- assigned_to: The user whom to assign the offense to.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id: int = int(args['offense_id'])
protected = args.get('protected')
follow_up = args.get('follow_up')
closing_reason_name = args.get('closing_reason_name')
status = args.get('status')
closing_reason_id = args.get('closing_reason_id')
if status == 'CLOSED' and (not closing_reason_id and not closing_reason_name):
raise DemistoException(
'''Closing reason ID must be provided when closing an offense. Available closing reasons can be achieved
by 'qradar-closing-reasons' command.'''
)
if closing_reason_name:
# if this call fails raise an error and stop command execution
closing_reasons_list = client.closing_reasons_list(include_deleted=True, include_reserved=True)
for closing_reason in closing_reasons_list:
if closing_reason.get('text') == closing_reason_name:
closing_reason_id = closing_reason.get('id')
if not closing_reason_id:
raise DemistoException(f'Could not find closing reason name {closing_reason_name}. Please provide a valid'
' closing reason name. Closing reasons can be retrieved by running the '
'qradar-closing-reasons command.')
assigned_to = args.get('assigned_to')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails, raise an error and stop command execution
response = client.offense_update(offense_id, protected, follow_up, status, closing_reason_id, assigned_to,
fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('offense Update', final_outputs, headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
|
41,095 | def get_parser():
"""Construct the parser."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--pretrained",
type=str,
help="Pretrained model."
)
parser.add_argument(
"--toolkit",
type=str,
help="Toolkit for Extracting X-vectors."
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="Verbosity level."
)
parser.add_argument(
"--device",
type=str,
default='cuda:0',
help="Verbosity level."
)
parser.add_argument(
"in_folder",
type=Path,
help="Path to the input data."
)
parser.add_argument(
"out_folder",
type=Path,
help="Output folder to save the xvectors.",
)
return parser
| def get_parser():
"""Construct the parser."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--pretrained",
type=str,
help="Pretrained model."
)
parser.add_argument(
"--toolkit",
type=str,
help="Toolkit for Extracting X-vectors."
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="Verbosity level."
)
parser.add_argument(
"--device",
type=str,
default='cuda:0',
help="Verbosity level."
)
parser.add_argument(
"in_folder",
type=Path,
help="Path to the input kaldi data directory."
)
parser.add_argument(
"out_folder",
type=Path,
help="Output folder to save the xvectors.",
)
return parser
|
31,167 | def store_successful_and_failed_packs_in_ci_artifacts(packs_results_file_path: str, stage: str, successful_packs: list,
failed_packs: list, images_data: dict = None):
""" Write the successful and failed packs to the correct section in the packs_results.json file
Args:
packs_results_file_path (str): The path to the pack_results.json file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
successful_packs (list): The list of all successful packs
failed_packs (list): The list of all failed packs
images_data (dict): The dict that contains the data of all the images that were uploaded
"""
packs_results = load_json(packs_results_file_path)
packs_results[stage] = dict()
if failed_packs:
failed_packs_dict = {
BucketUploadFlow.FAILED_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in failed_packs
}
}
packs_results[stage].update(failed_packs_dict)
logging.debug(f"Failed packs {failed_packs_dict}")
if successful_packs:
successful_packs_dict = {
BucketUploadFlow.SUCCESSFUL_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in successful_packs
}
}
packs_results[stage].update(successful_packs_dict)
logging.debug(f"Successful packs {successful_packs_dict}")
if images_data:
packs_results[stage].update({BucketUploadFlow.IMAGES: images_data})
logging.debug(f"Images data {images_data}")
if packs_results:
json_write(packs_results_file_path, packs_results)
| def store_successful_and_failed_packs_in_ci_artifacts(packs_results_file_path: str, stage: str, successful_packs: list,
failed_packs: list, images_data: dict = None):
""" Write the successful and failed packs to the correct section in the packs_results.json file
Args:
packs_results_file_path (str): The path to the pack_results.json file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
successful_packs (list): The list of all successful packs
failed_packs (list): The list of all failed packs
images_data (dict): A dict containing all images that were uploaded for each pack
"""
packs_results = load_json(packs_results_file_path)
packs_results[stage] = dict()
if failed_packs:
failed_packs_dict = {
BucketUploadFlow.FAILED_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in failed_packs
}
}
packs_results[stage].update(failed_packs_dict)
logging.debug(f"Failed packs {failed_packs_dict}")
if successful_packs:
successful_packs_dict = {
BucketUploadFlow.SUCCESSFUL_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in successful_packs
}
}
packs_results[stage].update(successful_packs_dict)
logging.debug(f"Successful packs {successful_packs_dict}")
if images_data:
packs_results[stage].update({BucketUploadFlow.IMAGES: images_data})
logging.debug(f"Images data {images_data}")
if packs_results:
json_write(packs_results_file_path, packs_results)
|
45,707 | def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (vil.shape[1], vil.shape[2]))
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("FFT: %s" % fft_method)
print("")
print("Parameters:")
print("-----------")
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("parallel threads: %d" % num_workers)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the ARI(p,1) model: %d" % ar_order)
if type(ar_window_radius) == int:
print("ARI(p,1) window radius: %d" % ar_window_radius)
else:
print("ARI(p,1) window radius: none")
print("R(VIL) window radius: %d" % r_vil_window_radius)
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
allow_nonfinite_values=True,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
r_f = []
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
if rainrate is not None:
r_f_prev = r_vil_a * vil[-1, :] + r_vil_b
else:
r_f_prev = vil[-1, :]
extrap_kwargs["return_displacement"] = True
dp = None
t_nowcast = 0
t_prev = 0.0
for t in range(len(timesteps)):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]]
else:
subtimesteps = [t]
if len(subtimesteps) > 1 or t > 0:
nowcast_time_step = True
else:
nowcast_time_step = False
if nowcast_time_step:
print(
"Computing nowcast for time step %d... " % (t_nowcast + 1),
end="",
flush=True,
)
t_nowcast += 1
if measure_time:
starttime = time.time()
# iterate the ARI models for each cascade level
for i in range(n_cascade_levels):
vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i])
# recompose the cascade to obtain the forecast field
vil_dec_dict = {}
vil_dec_dict["cascade_levels"] = vil_dec[:, -1, :]
vil_dec_dict["domain"] = "spatial"
vil_dec_dict["normalized"] = False
vil_f = recomp_method(vil_dec_dict)
vil_f[~mask] = np.nan
if rainrate is not None:
# convert VIL to rain rate
r_f_new = r_vil_a * vil_f + r_vil_b
else:
r_f_new = vil_f
if apply_rainrate_mask:
r_f_new[rainrate_mask] = 0.0
r_f_new[r_f_new < 0.0] = 0.0
# advect the recomposed field to obtain the forecast for the current
# time step (or subtimesteps if non-integer time steps are given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
r_f_ip = (
1.0 - t_diff_prev_int
) * r_f_prev + t_diff_prev_int * r_f_new
else:
r_f_ip = r_f_prev
t_diff_prev = t_sub - t_prev
extrap_kwargs["displacement_prev"] = dp
r_f_ep, dp = extrapolator(
r_f_ip,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
r_f.append(r_f_ep[0])
t_prev = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if len(subtimesteps) == 0:
t_diff_prev = t + 1 - t_prev
extrap_kwargs["displacement_prev"] = dp
_, dp = extrapolator(
None,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
t_prev = t + 1
r_f_prev = r_f_new
if nowcast_time_step:
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if measure_time:
return np.stack(r_f), init_time, mainloop_time
else:
return np.stack(r_f)
| def forecast(
vil,
velocity,
timesteps,
rainrate=None,
n_cascade_levels=8,
extrap_method="semilagrangian",
ar_order=2,
ar_window_radius=50,
r_vil_window_radius=3,
fft_method="numpy",
apply_rainrate_mask=True,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
measure_time=False,
):
"""Generate a nowcast by using the autoregressive nowcasting using VIL
(ANVIL) method. ANVIL is built on top of an extrapolation-based nowcast.
The key features are:
1) Growth and decay: implemented by using a cascade decomposition and
a multiscale autoregressive integrated ARI(p,1) model. Instead of the
original time series, the ARI model is applied to the differenced one
corresponding to time derivatives.
2) Originally designed for using integrated liquid (VIL) as the input data.
In this case, the rain rate (R) is obtained from VIL via an empirical
relation. This implementation is more general so that the input can be
any two-dimensional precipitation field.
3) The parameters of the ARI model and the R(VIL) relation are allowed to
be spatially variable. The estimation is done using a moving window.
Parameters
----------
vil: array_like
Array of shape (ar_order+2,m,n) containing the input fields ordered by
timestamp from oldest to newest. The inputs are expected to contain VIL
or rain rate. The time steps between the inputs are assumed to be regular.
velocity: array_like
Array of shape (2,m,n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs. All values are required to be finite.
timesteps: int or list
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
rainrate: array_like
Array of shape (m,n) containing the most recently observed rain rate
field. If set to None, no R(VIL) conversion is done and the outputs
are in the same units as the inputs.
n_cascade_levels: int, optional
The number of cascade levels to use.
extrap_method: str, optional
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
ar_order: int, optional
The order of the autoregressive model to use. The recommended values
are 1 or 2. Using a higher-order model is strongly discouraged because
the stationarity of the AR process cannot be guaranteed.
ar_window_radius: int, optional
The radius of the window to use for determining the parameters of the
autoregressive model. Set to None to disable localization.
r_vil_window_radius: int, optional
The radius of the window to use for determining the R(VIL) relation.
Applicable if rainrate is not None.
fft_method: str, optional
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
apply_rainrate_mask: bool
Apply mask to prevent producing precipitation to areas where it was not
originally observed. Defaults to True. Disabling this may improve some
verification metrics but increases the number of false alarms. Applicable
if rainrate is None.
num_workers: int, optional
The number of workers to use for parallel computation. Applicable if
dask is installed or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting
the environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
extrap_kwargs: dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict, optional
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
measure_time: bool, optional
If True, measure, print and return the computation time.
Returns
-------
out: ndarray
A three-dimensional array of shape (num_timesteps,m,n) containing a time
series of forecast precipitation fields. The time series starts from
t0+timestep, where timestep is taken from the input VIL/rain rate
fields. If measure_time is True, the return value is a three-element
tuple containing the nowcast array, the initialization time of the
nowcast generator and the time used in the main loop (seconds).
References
----------
:cite:`PCLH2020`
"""
_check_inputs(vil, rainrate, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
print("Computing ANVIL nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (vil.shape[1], vil.shape[2]))
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("FFT: %s" % fft_method)
print("")
print("Parameters:")
print("-----------")
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("parallel threads: %d" % num_workers)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the ARI(p,1) model: %d" % ar_order)
if type(ar_window_radius) == int:
print("ARI(p,1) window radius: %d" % ar_window_radius)
else:
print("ARI(p,1) window radius: none")
print("R(VIL) window radius: %d" % r_vil_window_radius)
if measure_time:
starttime_init = time.time()
m, n = vil.shape[1:]
vil = vil.copy()
if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
if rainrate is not None:
# determine the coefficients fields of the relation R=a*VIL+b by
# localized linear regression
r_vil_a, r_vil_b = _r_vil_regression(vil[-1, :], rainrate, r_vil_window_radius)
# transform the input fields to Lagrangian coordinates by extrapolation
extrapolator = extrapolation.get_method(extrap_method)
res = list()
def worker(vil, i):
return (
i,
extrapolator(
vil[i, :],
velocity,
vil.shape[0] - 1 - i,
allow_nonfinite_values=True,
**extrap_kwargs,
)[-1],
)
for i in range(vil.shape[0] - 1):
if not DASK_IMPORTED or num_workers == 1:
vil[i, :, :] = worker(vil, i)[1]
else:
res.append(dask.delayed(worker)(vil, i))
if DASK_IMPORTED and num_workers > 1:
num_workers_ = len(res) if num_workers > len(res) else num_workers
vil_e = dask.compute(*res, num_workers=num_workers_)
for i in range(len(vil_e)):
vil[vil_e[i][0], :] = vil_e[i][1]
# compute the final mask as the intersection of the masks of the advected
# fields
mask = np.isfinite(vil[0, :])
for i in range(1, vil.shape[0]):
mask = np.logical_and(mask, np.isfinite(vil[i, :]))
if rainrate is None and apply_rainrate_mask:
rainrate_mask = np.logical_and(rainrate_mask, mask)
# apply cascade decomposition to the advected input fields
bp_filter_method = cascade.get_method("gaussian")
bp_filter = bp_filter_method((m, n), n_cascade_levels, **filter_kwargs)
fft = utils.get_method(fft_method, shape=vil.shape[1:], n_threads=num_workers)
decomp_method, recomp_method = cascade.get_method("fft")
vil_dec = np.empty((n_cascade_levels, vil.shape[0], m, n))
for i in range(vil.shape[0]):
vil_ = vil[i, :].copy()
vil_[~np.isfinite(vil_)] = 0.0
vil_dec_i = decomp_method(vil_, bp_filter, fft_method=fft)
for j in range(n_cascade_levels):
vil_dec[j, i, :] = vil_dec_i["cascade_levels"][j, :]
# compute time-lagged correlation coefficients for the cascade levels of
# the advected and differenced input fields
gamma = np.empty((n_cascade_levels, ar_order, m, n))
for i in range(n_cascade_levels):
vil_diff = np.diff(vil_dec[i, :], axis=0)
vil_diff[~np.isfinite(vil_diff)] = 0.0
for j in range(ar_order):
gamma[i, j, :] = _moving_window_corrcoef(
vil_diff[-1, :], vil_diff[-(j + 2), :], ar_window_radius
)
if ar_order == 2:
# if the order of the ARI model is 2, adjust the correlation coefficients
# so that the resulting process is stationary
for i in range(n_cascade_levels):
gamma[i, 1, :] = autoregression.adjust_lag2_corrcoef2(
gamma[i, 0, :], gamma[i, 1, :]
)
# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
if ar_order > 2:
phi_ = autoregression.estimate_ar_params_yw_localized(gamma[i, :], d=1)
elif ar_order == 2:
phi_ = _estimate_ar2_params(gamma[i, :])
else:
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)
vil_dec = vil_dec[:, -(ar_order + 1) :, :]
if measure_time:
init_time = time.time() - starttime_init
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
r_f = []
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
if rainrate is not None:
r_f_prev = r_vil_a * vil[-1, :] + r_vil_b
else:
r_f_prev = vil[-1, :]
extrap_kwargs["return_displacement"] = True
dp = None
t_nowcast = 0
t_prev = 0.0
for t in range(len(timesteps)):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in timestep]
else:
subtimesteps = [t]
if len(subtimesteps) > 1 or t > 0:
nowcast_time_step = True
else:
nowcast_time_step = False
if nowcast_time_step:
print(
"Computing nowcast for time step %d... " % (t_nowcast + 1),
end="",
flush=True,
)
t_nowcast += 1
if measure_time:
starttime = time.time()
# iterate the ARI models for each cascade level
for i in range(n_cascade_levels):
vil_dec[i, :] = autoregression.iterate_ar_model(vil_dec[i, :], phi[i])
# recompose the cascade to obtain the forecast field
vil_dec_dict = {}
vil_dec_dict["cascade_levels"] = vil_dec[:, -1, :]
vil_dec_dict["domain"] = "spatial"
vil_dec_dict["normalized"] = False
vil_f = recomp_method(vil_dec_dict)
vil_f[~mask] = np.nan
if rainrate is not None:
# convert VIL to rain rate
r_f_new = r_vil_a * vil_f + r_vil_b
else:
r_f_new = vil_f
if apply_rainrate_mask:
r_f_new[rainrate_mask] = 0.0
r_f_new[r_f_new < 0.0] = 0.0
# advect the recomposed field to obtain the forecast for the current
# time step (or subtimesteps if non-integer time steps are given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
r_f_ip = (
1.0 - t_diff_prev_int
) * r_f_prev + t_diff_prev_int * r_f_new
else:
r_f_ip = r_f_prev
t_diff_prev = t_sub - t_prev
extrap_kwargs["displacement_prev"] = dp
r_f_ep, dp = extrapolator(
r_f_ip,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
r_f.append(r_f_ep[0])
t_prev = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if len(subtimesteps) == 0:
t_diff_prev = t + 1 - t_prev
extrap_kwargs["displacement_prev"] = dp
_, dp = extrapolator(
None,
velocity,
[t_diff_prev],
allow_nonfinite_values=True,
**extrap_kwargs,
)
t_prev = t + 1
r_f_prev = r_f_new
if nowcast_time_step:
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if measure_time:
return np.stack(r_f), init_time, mainloop_time
else:
return np.stack(r_f)
|
4,127 | def approx_pi(n: cython.int=10000000):
val: cython.double = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
| def approx_pi(n: cython.int = 10000000):
val: cython.double = 0.
k: cython.int
for k in range(1, n + 1):
val += recip_square(k)
return (6 * val) ** .5
|
2,423 | def test_calibration_display_pos_label(pyplot, iris_data_binary):
"""Check the behaviour of `pos_label` in the `CalibrationDisplay`."""
X, y = iris_data_binary
lr = LogisticRegression().fit(X, y)
pos_label = 0
viz = CalibrationDisplay.from_estimator(lr, X, y, pos_label=pos_label)
y_prob = lr.predict_proba(X)[:, pos_label]
prob_true, prob_pred = calibration_curve(y, y_prob, pos_label=pos_label)
assert_allclose(viz.prob_true, prob_true)
assert_allclose(viz.prob_pred, prob_pred)
assert_allclose(viz.y_prob, y_prob)
assert (
viz.ax_.get_xlabel()
== f"Mean predicted probability (Positive label: {pos_label})"
)
assert (
viz.ax_.get_ylabel() == f"Fraction of positives (Positive label: {pos_label})"
)
assert viz.line_.get_label() == "LogisticRegression"
| def test_calibration_display_pos_label(pyplot, iris_data_binary):
"""Check the behaviour of `pos_label` in the `CalibrationDisplay`."""
X, y = iris_data_binary
lr = LogisticRegression().fit(X, y)
pos_label = 0
viz = CalibrationDisplay.from_estimator(lr, X, y, pos_label=pos_label)
y_prob = lr.predict_proba(X)[:, pos_label]
prob_true, prob_pred = calibration_curve(y, y_prob, pos_label=pos_label)
assert_allclose(viz.prob_true, prob_true)
assert_allclose(viz.prob_pred, prob_pred)
assert_allclose(viz.y_prob, y_prob)
assert (
viz.ax_.get_xlabel()
== f"Mean predicted probability (Positive class: {pos_label})"
)
assert (
viz.ax_.get_ylabel() == f"Fraction of positives (Positive label: {pos_label})"
)
assert viz.line_.get_label() == "LogisticRegression"
|
5,297 | def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss != 1 and m.loss != 2:
raise ValueError("Loss paramter value can be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model != 1 and m.model != 2:
raise ValueError(
"Model parameter value can be either 1 (for Continous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
| def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Here we are checking for unsupported FB FT Modes
if m.loss != 1 and m.loss != 2:
raise ValueError("Loss paramter value can be either 1 (for Hierarchical Softmax) or 2 (for Negative Sampling)")
elif m.model not in (1, 2):
raise ValueError(
"Model parameter value can be either 1 (for Continous Bag of Words model) or 2 (for Skip-gram model)")
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
19,653 | def create_table(mapping, metadata):
"""Given a mapping data structure (from mapping.yml) and SQLAlchemy
metadata, create a table matching the mapping.
Mapping should be a dict-like with keys "fields", "table" and
optionally "oid_as_pk" and "recored_type" """
fields = []
_handle_primary_key(mapping, fields)
# make a field list to create
for field in fields_for_mapping(mapping):
if mapping["oid_as_pk"] and field["sf"] == "Id":
continue
fields.append(Column(field["db"], Unicode(255)))
if "record_type" in mapping:
fields.append(Column("record_type", Unicode(255)))
t = Table(mapping["table"], metadata, *fields)
if t.exists():
raise BulkDataException("Table already exists: {}".format(mapping["table"]))
return t
| def create_table(mapping, metadata):
"""Given a mapping data structure (from mapping.yml) and SQLAlchemy
metadata, create a table matching the mapping.
Mapping should be a dict-like with keys "fields", "table" and
optionally "oid_as_pk" and "record_type" """
fields = []
_handle_primary_key(mapping, fields)
# make a field list to create
for field in fields_for_mapping(mapping):
if mapping["oid_as_pk"] and field["sf"] == "Id":
continue
fields.append(Column(field["db"], Unicode(255)))
if "record_type" in mapping:
fields.append(Column("record_type", Unicode(255)))
t = Table(mapping["table"], metadata, *fields)
if t.exists():
raise BulkDataException("Table already exists: {}".format(mapping["table"]))
return t
|
43,877 | def cache_execute(fn, cache, pass_kwargs=False, return_tuple=True):
"""Decorator that adds caching to a function that executes
multiple tapes on a device.
This decorator makes use of :attr:`.QuantumTape.hash` to identify
unique tapes.
- If a tape does not match a hash in the cache, then the tape
has not been previously executed. It is executed, and the result
added to the cache.
- If a tape matches a hash in the cache, then the tape has been previously
executed. The corresponding cached result is
extracted, and the tape is not passed to the execution function.
- Finally, there might be the case where one or more tapes in the current
set of tapes to be executed share a hash. If this is the case, duplicated
are removed, to avoid redundant evaluations.
Args:
fn (callable): The execution function to add caching to.
This function should have the signature ``fn(tapes, **kwargs)``,
and it should return ``list[tensor_like]``, with the
same length as the input ``tapes``.
cache (None or dict or Cache): The cache to use. If ``None``,
caching will not occur.
pass_kwargs (bool): If ``False``, keyword arguments passed to the
wrapped function will be passed directly to ``fn``. If ``True``,
they will be ignored.
return_tuple (bool): If ``True``, the output of ``fn`` is returned
as a tuple ``(fn_ouput, [])``, to match the output of execution functions
that also return gradients.
Returns:
function: a wrapped version of the execution function ``fn`` with caching
support
"""
@wraps(fn)
def wrapper(tapes, **kwargs):
if not pass_kwargs:
kwargs = {}
if cache is None or (isinstance(cache, bool) and not cache):
# No caching. Simply execution the execution function
# and return the results.
if not return_tuple:
return fn(tapes, **kwargs)
return fn(tapes, **kwargs), []
execution_tapes = OrderedDict()
cached_results = {}
hashes = {}
repeated = {}
for i, tape in enumerate(tapes):
h = tape.hash
if h in hashes.values():
# Tape already exists within ``tapes``. Determine the
# index of the first occurance of the tape, store this,
# and continue to the next iteration.
idx = list(hashes.keys())[list(hashes.values()).index(h)]
repeated[i] = idx
continue
hashes[i] = h
if hashes[i] in cache:
# Tape exists within the cache, store the cached result
cached_results[i] = cache[hashes[i]]
else:
# Tape does not exist within the cache, store the tape
# for execution via the execution function.
execution_tapes[i] = tape
# execute all unique tapes that do not exist in the cache
res = fn(execution_tapes.values(), **kwargs)
final_res = []
for i, tape in enumerate(tapes):
if i in cached_results:
# insert cached results into the results vector
final_res.append(cached_results[i])
elif i in repeated:
# insert repeated results into the results vector
final_res.append(final_res[repeated[i]])
else:
# insert evaluated results into the results vector
r = res.pop(0)
final_res.append(r)
cache[hashes[i]] = r
if not return_tuple:
return final_res
return final_res, []
wrapper.fn = fn
return wrapper
| def cache_execute(fn, cache, pass_kwargs=False, return_tuple=True):
"""Decorator that adds caching to a function that executes
multiple tapes on a device.
This decorator makes use of :attr:`.QuantumTape.hash` to identify
unique tapes.
- If a tape does not match a hash in the cache, then the tape
# No caching. Simply execute the execution function
added to the cache.
- If a tape matches a hash in the cache, then the tape has been previously
executed. The corresponding cached result is
extracted, and the tape is not passed to the execution function.
- Finally, there might be the case where one or more tapes in the current
set of tapes to be executed share a hash. If this is the case, duplicated
are removed, to avoid redundant evaluations.
Args:
fn (callable): The execution function to add caching to.
This function should have the signature ``fn(tapes, **kwargs)``,
and it should return ``list[tensor_like]``, with the
same length as the input ``tapes``.
cache (None or dict or Cache): The cache to use. If ``None``,
caching will not occur.
pass_kwargs (bool): If ``False``, keyword arguments passed to the
wrapped function will be passed directly to ``fn``. If ``True``,
they will be ignored.
return_tuple (bool): If ``True``, the output of ``fn`` is returned
as a tuple ``(fn_ouput, [])``, to match the output of execution functions
that also return gradients.
Returns:
function: a wrapped version of the execution function ``fn`` with caching
support
"""
@wraps(fn)
def wrapper(tapes, **kwargs):
if not pass_kwargs:
kwargs = {}
if cache is None or (isinstance(cache, bool) and not cache):
# No caching. Simply execution the execution function
# and return the results.
if not return_tuple:
return fn(tapes, **kwargs)
return fn(tapes, **kwargs), []
execution_tapes = OrderedDict()
cached_results = {}
hashes = {}
repeated = {}
for i, tape in enumerate(tapes):
h = tape.hash
if h in hashes.values():
# Tape already exists within ``tapes``. Determine the
# index of the first occurance of the tape, store this,
# and continue to the next iteration.
idx = list(hashes.keys())[list(hashes.values()).index(h)]
repeated[i] = idx
continue
hashes[i] = h
if hashes[i] in cache:
# Tape exists within the cache, store the cached result
cached_results[i] = cache[hashes[i]]
else:
# Tape does not exist within the cache, store the tape
# for execution via the execution function.
execution_tapes[i] = tape
# execute all unique tapes that do not exist in the cache
res = fn(execution_tapes.values(), **kwargs)
final_res = []
for i, tape in enumerate(tapes):
if i in cached_results:
# insert cached results into the results vector
final_res.append(cached_results[i])
elif i in repeated:
# insert repeated results into the results vector
final_res.append(final_res[repeated[i]])
else:
# insert evaluated results into the results vector
r = res.pop(0)
final_res.append(r)
cache[hashes[i]] = r
if not return_tuple:
return final_res
return final_res, []
wrapper.fn = fn
return wrapper
|
54,731 | def _data_checks_pivot_longer(
df, index, column_names, names_sep, names_pattern, names_to, values_to
):
"""
This function raises errors or warnings if the arguments have the wrong
python type, or if an unneeded argument is provided. This function is
executed before proceeding to the computation phase.
"""
if any(
(
isinstance(df.index, pd.MultiIndex),
isinstance(df.columns, pd.MultiIndex),
),
):
warnings.warn(
"""pivot_longer is designed for single index dataframes and
may produce unexpected results for multiIndex dataframes;
for such cases, kindly use pandas.melt."""
)
if index is not None:
if isinstance(index, str):
index = [index]
check("index", index, [list, tuple, Pattern])
if column_names is not None:
if isinstance(column_names, str):
column_names = [column_names]
check("column_names", column_names, [list, tuple, Pattern])
if names_to is not None:
check("names_to", names_to, [list, tuple, str])
if isinstance(names_to, (list, tuple)):
if not all(isinstance(word, str) for word in names_to):
raise TypeError(
"All entries in `names_to` argument must be strings."
)
if len(names_to) > 1:
if all((names_pattern is not None, names_sep is not None)):
raise ValueError(
"""Only one of names_pattern or names_sep
should be provided."""
)
if isinstance(names_to, str) or (len(names_to) == 1):
# names_sep creates more than one column
# whereas regex with names_pattern can be limited to one column
if names_sep is not None:
raise ValueError(
"""
For a single names_to value,
names_sep is not required.
"""
)
if names_pattern is not None:
check("names_pattern", names_pattern, [str, Pattern])
if names_sep is not None:
check("names_sep", names_sep, [str, Pattern])
check("values_to", values_to, [str])
return (
df,
index,
column_names,
names_sep,
names_pattern,
names_to,
values_to,
)
| def _data_checks_pivot_longer(
df, index, column_names, names_sep, names_pattern, names_to, values_to
):
"""
This function raises errors or warnings if the arguments have the wrong
python type, or if an unneeded argument is provided.
This function is executed before proceeding to the computation phase.
Type annotations are not provided because this function is where type checking happens.
"""
if any(
(
isinstance(df.index, pd.MultiIndex),
isinstance(df.columns, pd.MultiIndex),
),
):
warnings.warn(
"""pivot_longer is designed for single index dataframes and
may produce unexpected results for multiIndex dataframes;
for such cases, kindly use pandas.melt."""
)
if index is not None:
if isinstance(index, str):
index = [index]
check("index", index, [list, tuple, Pattern])
if column_names is not None:
if isinstance(column_names, str):
column_names = [column_names]
check("column_names", column_names, [list, tuple, Pattern])
if names_to is not None:
check("names_to", names_to, [list, tuple, str])
if isinstance(names_to, (list, tuple)):
if not all(isinstance(word, str) for word in names_to):
raise TypeError(
"All entries in `names_to` argument must be strings."
)
if len(names_to) > 1:
if all((names_pattern is not None, names_sep is not None)):
raise ValueError(
"""Only one of names_pattern or names_sep
should be provided."""
)
if isinstance(names_to, str) or (len(names_to) == 1):
# names_sep creates more than one column
# whereas regex with names_pattern can be limited to one column
if names_sep is not None:
raise ValueError(
"""
For a single names_to value,
names_sep is not required.
"""
)
if names_pattern is not None:
check("names_pattern", names_pattern, [str, Pattern])
if names_sep is not None:
check("names_sep", names_sep, [str, Pattern])
check("values_to", values_to, [str])
return (
df,
index,
column_names,
names_sep,
names_pattern,
names_to,
values_to,
)
|
39,603 | def resolve_ptr(
near_endpoint: s_obj.Object,
pointer_name: str,
*,
upcoming_intersections: Sequence[s_types.Type] = (),
far_endpoints: Iterable[s_obj.Object] = (),
direction: s_pointers.PointerDirection = (
s_pointers.PointerDirection.Outbound
),
source_context: Optional[parsing.ParserContext] = None,
track_ref: Optional[Union[qlast.Base, Literal[False]]],
ctx: context.ContextLevel,
) -> s_pointers.Pointer:
if not isinstance(near_endpoint, s_sources.Source):
# Reference to a property on non-object
msg = 'invalid property reference on a primitive type expression'
raise errors.InvalidReferenceError(msg, context=source_context)
ptr: Optional[s_pointers.Pointer] = None
if direction is s_pointers.PointerDirection.Outbound:
ptr = near_endpoint.maybe_get_ptr(
ctx.env.schema,
s_name.UnqualName(pointer_name),
)
if ptr is not None:
ref = ptr.get_nearest_non_derived_parent(ctx.env.schema)
if track_ref is not False:
ctx.env.add_schema_ref(ref, track_ref)
else:
ptrs = near_endpoint.getrptrs(ctx.env.schema, pointer_name,
sources=far_endpoints)
if ptrs:
if track_ref is not False:
# If this reverse pointer access is followed by
# intersections, we filter out any pointers that
# couldn't be picked up by the intersections. This avoids
# creating spurious dependencies when reverse
# links are used in schemas.
dep_ptrs = {
ptr for ptr in ptrs
if (src := ptr.get_source(ctx.env.schema))
and all(
src.issubclass(ctx.env.schema, typ)
or any(
dsrc.issubclass(ctx.env.schema, typ)
for dsrc in src.descendants(ctx.env.schema)
)
for typ in upcoming_intersections
)
}
for p in dep_ptrs:
ctx.env.add_schema_ref(
p.get_nearest_non_derived_parent(ctx.env.schema),
track_ref)
for ptr in ptrs:
if ptr.is_pure_computable(ctx.env.schema):
vname = ptr.get_verbosename(ctx.env.schema,
with_parent=True)
raise errors.InvalidReferenceError(
f'cannot follow backlink {pointer_name!r} because '
f'the {vname} is computable',
context=source_context
)
opaque = not far_endpoints
ctx.env.schema, ptr = s_pointers.get_or_create_union_pointer(
ctx.env.schema,
ptrname=s_name.UnqualName(pointer_name),
source=near_endpoint,
direction=direction,
components=ptrs,
opaque=opaque,
modname=ctx.derived_target_module,
)
if ptr is not None:
return ptr
if isinstance(near_endpoint, s_links.Link):
vname = near_endpoint.get_verbosename(ctx.env.schema, with_parent=True)
msg = f'{vname} has no property {pointer_name!r}'
elif direction == s_pointers.PointerDirection.Outbound:
msg = (f'{near_endpoint.get_verbosename(ctx.env.schema)} '
f'has no link or property {pointer_name!r}')
else:
nep_name = near_endpoint.get_displayname(ctx.env.schema)
path = f'{nep_name}.{direction}{pointer_name}'
msg = f'{path!r} does not resolve to any known path'
err = errors.InvalidReferenceError(msg, context=source_context)
if direction is s_pointers.PointerDirection.Outbound:
near_enpoint_pointers = near_endpoint.get_pointers(ctx.env.schema)
s_utils.enrich_schema_lookup_error(
err,
s_name.UnqualName(pointer_name),
modaliases=ctx.modaliases,
item_type=s_pointers.Pointer,
collection=near_enpoint_pointers.objects(ctx.env.schema),
schema=ctx.env.schema,
)
raise err
| def resolve_ptr(
near_endpoint: s_obj.Object,
pointer_name: str,
*,
upcoming_intersections: Sequence[s_types.Type] = (),
far_endpoints: Iterable[s_obj.Object] = (),
direction: s_pointers.PointerDirection = (
s_pointers.PointerDirection.Outbound
),
source_context: Optional[parsing.ParserContext] = None,
track_ref: Optional[Union[qlast.Base, Literal[False]]],
ctx: context.ContextLevel,
) -> s_pointers.Pointer:
if not isinstance(near_endpoint, s_sources.Source):
# Reference to a property on non-object
msg = 'invalid property reference on a primitive type expression'
raise errors.InvalidReferenceError(msg, context=source_context)
ptr: Optional[s_pointers.Pointer] = None
if direction is s_pointers.PointerDirection.Outbound:
ptr = near_endpoint.maybe_get_ptr(
ctx.env.schema,
s_name.UnqualName(pointer_name),
)
if ptr is not None:
ref = ptr.get_nearest_non_derived_parent(ctx.env.schema)
if track_ref is not False:
ctx.env.add_schema_ref(ref, track_ref)
else:
ptrs = near_endpoint.getrptrs(ctx.env.schema, pointer_name,
sources=far_endpoints)
if ptrs:
if track_ref is not False:
# If this reverse pointer access is followed by
# intersections, we filter out any pointers that
# couldn't be picked up by the intersections. This avoids
# creating spurious dependencies when reverse
# links are used in schemas.
dep_ptrs = {
ptr for ptr in ptrs
if (src := ptr.get_source(ctx.env.schema))
and all(
src.issubclass(ctx.env.schema, typ)
or any(
dsrc.issubclass(ctx.env.schema, typ)
for dsrc in src.descendants(ctx.env.schema)
)
for typ in upcoming_intersections
)
}
for p in dep_ptrs:
ctx.env.add_schema_ref(
p.get_nearest_non_derived_parent(ctx.env.schema),
track_ref)
for ptr in ptrs:
if ptr.is_pure_computable(ctx.env.schema):
vname = ptr.get_verbosename(ctx.env.schema,
with_parent=True)
raise errors.InvalidReferenceError(
f'cannot follow backlink {pointer_name!r} because '
f'{vname} is computable',
context=source_context
)
opaque = not far_endpoints
ctx.env.schema, ptr = s_pointers.get_or_create_union_pointer(
ctx.env.schema,
ptrname=s_name.UnqualName(pointer_name),
source=near_endpoint,
direction=direction,
components=ptrs,
opaque=opaque,
modname=ctx.derived_target_module,
)
if ptr is not None:
return ptr
if isinstance(near_endpoint, s_links.Link):
vname = near_endpoint.get_verbosename(ctx.env.schema, with_parent=True)
msg = f'{vname} has no property {pointer_name!r}'
elif direction == s_pointers.PointerDirection.Outbound:
msg = (f'{near_endpoint.get_verbosename(ctx.env.schema)} '
f'has no link or property {pointer_name!r}')
else:
nep_name = near_endpoint.get_displayname(ctx.env.schema)
path = f'{nep_name}.{direction}{pointer_name}'
msg = f'{path!r} does not resolve to any known path'
err = errors.InvalidReferenceError(msg, context=source_context)
if direction is s_pointers.PointerDirection.Outbound:
near_enpoint_pointers = near_endpoint.get_pointers(ctx.env.schema)
s_utils.enrich_schema_lookup_error(
err,
s_name.UnqualName(pointer_name),
modaliases=ctx.modaliases,
item_type=s_pointers.Pointer,
collection=near_enpoint_pointers.objects(ctx.env.schema),
schema=ctx.env.schema,
)
raise err
|
8,652 | def print_config():
"""Print list of available configuration from default homedir."""
configs = enumerate_configs(DEFAULT_HOMEDIR)
print('Config files in ~/.sopel:')
config = None
for config in configs:
print('\t%s' % config)
if not config:
print('\tNone found')
print('-------------------------')
| def print_config():
"""Print list of available configurations from default homedir."""
configs = enumerate_configs(DEFAULT_HOMEDIR)
print('Config files in ~/.sopel:')
config = None
for config in configs:
print('\t%s' % config)
if not config:
print('\tNone found')
print('-------------------------')
|
57,233 | def plot(data, cats=None, pconfig=None):
"""Plot a horizontal bar graph. Expects a 2D dict of sample
data. Also can take info about categories. There are quite a
few variants of how to use this function, see the docs for details.
:param data: 2D dict, first keys as sample names, then x:y data pairs
Can supply a list of dicts and will have buttons to switch
:param cats: optional list, dict or OrderedDict with plot categories
:param pconfig: optional dict with config key:value pairs
:return: HTML and JS, ready to be inserted into the page
"""
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
pconfig[k] = v
# Validate config if linting
if config.lint:
# Get module name
modname = ""
callstack = inspect.stack()
for n in callstack:
if "multiqc/modules/" in n[1] and "base_module.py" not in n[1]:
callpath = n[1].split("multiqc/modules/", 1)[-1]
modname = ">{}< ".format(callpath)
break
# Look for essential missing pconfig keys
for k in ["id", "title", "ylab"]:
if k not in pconfig:
errmsg = "LINT: {}Bargraph pconfig was missing key '{}'".format(modname, k)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Check plot title format
if not re.match(r"^[^:]*\S: \S[^:]*$", pconfig.get("title", "")):
errmsg = "LINT: {} Bargraph title did not match format 'Module: Plot Name' (found '{}')".format(
modname, pconfig.get("title", "")
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Make list of cats from different inputs
if cats is None:
cats = list()
elif type(cats) is not list:
cats = [cats]
else:
try: # Py2
if type(cats[0]) is str or type(cats[0]) is unicode:
cats = [cats]
except NameError: # Py3
if type(cats[0]) is str:
cats = [cats]
# Generate default categories if not supplied
for idx in range(len(data)):
try:
cats[idx]
except (IndexError):
cats.append(list())
for s in data[idx].keys():
for k in data[idx][s].keys():
if k not in cats[idx]:
cats[idx].append(k)
# If we have cats in lists, turn them into dicts
for idx, cat in enumerate(cats):
if type(cat) is list:
newcats = OrderedDict()
for c in cat:
newcats[c] = {"name": c}
cats[idx] = newcats
else:
for c in cat:
if "name" not in cat[c]:
cats[idx][c]["name"] = c
# Allow user to overwrite a given category config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
if k in cats[idx].keys():
for kk, vv in v.items():
cats[idx][k][kk] = vv
# Parse the data into a chart friendly format
plotsamples = list()
plotdata = list()
for idx, d in enumerate(data):
if isinstance(d, OrderedDict):
hc_samples = list(d.keys())
else:
hc_samples = sorted(list(d.keys()))
hc_data = list()
sample_dcount = dict()
for c in cats[idx].keys():
thisdata = list()
catcount = 0
for s in hc_samples:
if s not in sample_dcount:
sample_dcount[s] = 0
try:
thisdata.append(float(d[s][c]))
catcount += 1
sample_dcount[s] += 1
except (KeyError, ValueError):
# Pad with NaNs when we have missing categories in a sample
thisdata.append(float("nan"))
if catcount > 0:
if pconfig.get("hide_zero_cats", True) is False or max(x for x in thisdata if not math.isnan(x)) > 0:
thisdict = {"name": cats[idx][c]["name"], "data": thisdata}
if "color" in cats[idx][c]:
thisdict["color"] = cats[idx][c]["color"]
hc_data.append(thisdict)
# Remove empty samples
for s, c in sample_dcount.items():
if c == 0:
idx = hc_samples.index(s)
del hc_samples[idx]
for j, d in enumerate(hc_data):
del hc_data[j]["data"][idx]
if len(hc_data) > 0:
plotsamples.append(hc_samples)
plotdata.append(hc_data)
if len(plotdata) == 0:
logger.warning("Tried to make bar plot, but had no data")
return '<p class="text-danger">Error - was not able to plot data.</p>'
# Make a plot - custom, interactive or flat
try:
return get_template_mod().bargraph(plotdata, plotsamples, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive and len(plotsamples[0]) > config.plots_flat_numseries
):
try:
plot = matplotlib_bargraph(plotdata, plotsamples, pconfig)
report.num_mpl_plots += 1
return plot
except Exception as e:
logger.error("############### Error making MatPlotLib figure! Falling back to HighCharts.")
logger.debug(e, exc_info=True)
return highcharts_bargraph(plotdata, plotsamples, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_bargraph(plotdata, plotsamples, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_bargraph(plotdata, plotsamples, pconfig)
| def plot(data, cats=None, pconfig=None):
"""Plot a horizontal bar graph. Expects a 2D dict of sample
data. Also can take info about categories. There are quite a
few variants of how to use this function, see the docs for details.
:param data: 2D dict, first keys as sample names, then x:y data pairs
Can supply a list of dicts and will have buttons to switch
:param cats: optional list, dict or OrderedDict with plot categories
:param pconfig: optional dict with config key:value pairs
:return: HTML and JS, ready to be inserted into the page
"""
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
pconfig[k] = v
# Validate config if linting
if config.lint:
# Get module name
modname = ""
callstack = inspect.stack()
for n in callstack:
if "multiqc/modules/" in n[1] and "base_module.py" not in n[1]:
callpath = n[1].split("multiqc/modules/", 1)[-1]
modname = ">{}< ".format(callpath)
break
# Look for essential missing pconfig keys
for k in ["id", "title", "ylab"]:
if k not in pconfig:
errmsg = "LINT: {}Bargraph pconfig was missing key '{}'".format(modname, k)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Check plot title format
if not re.match(r"^[^:]*\S: \S[^:]*$", pconfig.get("title", "")):
errmsg = "LINT: {} Bargraph title did not match format 'Module: Plot Name' (found '{}')".format(
modname, pconfig.get("title", "")
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Make list of cats from different inputs
if cats is None:
cats = list()
elif type(cats) is not list:
cats = [cats]
else:
try: # Py2
if type(cats[0]) is str or type(cats[0]) is unicode:
cats = [cats]
except NameError: # Py3
if type(cats[0]) is str:
cats = [cats]
# Generate default categories if not supplied
for idx in range(len(data)):
try:
cats[idx]
except (IndexError):
cats.append(list())
for s in data[idx].keys():
for k in data[idx][s].keys():
if k not in cats[idx]:
cats[idx].append(k)
# If we have cats in lists, turn them into dicts
for idx, cat in enumerate(cats):
if type(cat) is list:
newcats = OrderedDict()
for c in cat:
newcats[c] = {"name": c}
cats[idx] = newcats
else:
for c in cat:
if "name" not in cat[c]:
cats[idx][c]["name"] = c
# Allow user to overwrite a given category config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
if k in cats[idx].keys():
for kk, vv in v.items():
cats[idx][k][kk] = vv
# Parse the data into a chart friendly format
plotsamples = list()
plotdata = list()
for idx, d in enumerate(data):
if isinstance(d, OrderedDict):
hc_samples = list(d.keys())
else:
hc_samples = sorted(list(d.keys()))
hc_data = list()
sample_dcount = dict()
for c in cats[idx].keys():
thisdata = list()
catcount = 0
for s in hc_samples:
if s not in sample_dcount:
sample_dcount[s] = 0
try:
thisdata.append(float(d[s][c]))
catcount += 1
sample_dcount[s] += 1
except (KeyError, ValueError):
# Pad with NaNs when we have missing categories in a sample
thisdata.append(float("nan"))
if catcount > 0:
if pconfig.get("hide_zero_cats", True) is False or max(x for x in thisdata if not math.isnan(x)) > 0:
thisdict = {"name": cats[idx][c]["name"], "data": thisdata}
if "color" in cats[idx][c]:
thisdict["color"] = cats[idx][c]["color"]
hc_data.append(thisdict)
# Remove empty samples
for s, c in sample_dcount.items():
if c == 0:
idx = hc_samples.index(s)
del hc_samples[idx]
for j, d in enumerate(hc_data):
del hc_data[j]["data"][idx]
if len(hc_data) > 0:
plotsamples.append(hc_samples)
plotdata.append(hc_data)
if len(plotdata) == 0:
logger.warning("Tried to make bar plot, but had no data")
return '<p class="text-danger">Error - was not able to plot data.</p>'
# Make a plot - custom, interactive or flat
try:
return get_template_mod().bargraph(plotdata, plotsamples, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive and len(plotsamples[0]) > config.plots_flat_numseries
):
try:
report.num_mpl_plots += 1
return matplotlib_bargraph(plotdata, plotsamples, pconfig)
except Exception as e:
logger.error("############### Error making MatPlotLib figure! Falling back to HighCharts.")
logger.debug(e, exc_info=True)
return highcharts_bargraph(plotdata, plotsamples, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_bargraph(plotdata, plotsamples, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_bargraph(plotdata, plotsamples, pconfig)
|
30,986 | def search_pack(client, prints_manager, pack_display_name, pack_id, thread_index, lock):
""" Make a pack search request.
Args:
client (demisto_client): The configured client to use.
prints_manager (ParallelPrintsManager): Print manager object.
pack_display_name (string): The pack display name.
pack_id (string): The pack ID.
thread_index (int): the thread index.
lock (Lock): A lock object.
Returns:
(dict): Returns the pack data if found, or empty dict otherwise.
"""
try:
# make the search request
response_data, status_code, _ = demisto_client.generic_request_func(client,
path=f'/contentpacks/marketplace/{pack_id}',
method='GET',
accept='application/json',
_request_timeout=None)
if 200 <= status_code < 300:
result_object = ast.literal_eval(response_data)
if result_object and result_object.get('currentVersion'):
print_msg = 'Found pack: {} by its ID: {} in bucket!\n'.format(pack_display_name, pack_id)
prints_manager.add_print_job(print_msg, print_color, thread_index, LOG_COLORS.GREEN)
prints_manager.execute_thread_prints(thread_index)
pack_data = {
'id': result_object.get('id'),
'version': result_object.get('currentVersion')
}
return pack_data
else:
print_msg = 'Did not find pack: {} by its ID: {} in bucket.\n'.format(pack_display_name, pack_id)
prints_manager.add_print_job(print_msg, print_color, thread_index, LOG_COLORS.RED)
prints_manager.execute_thread_prints(thread_index)
raise Exception(print_msg)
else:
result_object = ast.literal_eval(response_data)
msg = result_object.get('message', '')
err_msg = 'Search request for pack: {} with ID: {}, failed with status code {}\n{}'.format(
pack_display_name,
pack_id, status_code,
msg)
raise Exception(err_msg)
except Exception as e:
err_msg = 'Search request for pack: {} with ID: {}, failed. Reason:\n{}'.format(pack_display_name, pack_id,
str(e))
prints_manager.add_print_job(err_msg, print_color, thread_index, LOG_COLORS.RED)
lock.acquire()
global SUCCESS_FLAG
SUCCESS_FLAG = False
lock.release()
| def search_pack(client, prints_manager, pack_display_name, pack_id, thread_index, lock):
""" Make a pack search request.
Args:
client (demisto_client): The configured client to use.
prints_manager (ParallelPrintsManager): Print manager object.
pack_display_name (string): The pack display name.
pack_id (string): The pack ID.
thread_index (int): the thread index.
lock (Lock): A lock object.
Returns:
(dict): Returns the pack data if found, or empty dict otherwise.
"""
try:
# make the search request
response_data, status_code, _ = demisto_client.generic_request_func(client,
path=f'/contentpacks/marketplace/{pack_id}',
method='GET',
accept='application/json',
_request_timeout=None)
if 200 <= status_code < 300:
result_object = ast.literal_eval(response_data)
if result_object and result_object.get('currentVersion'):
print_msg = 'Found pack: {} by its ID: {} in bucket!\n'.format(pack_display_name, pack_id)
prints_manager.add_print_job(print_msg, print_color, thread_index, LOG_COLORS.GREEN)
prints_manager.execute_thread_prints(thread_index)
pack_data = {
'id': result_object.get('id'),
'version': result_object.get('currentVersion'),
}
return pack_data
else:
print_msg = 'Did not find pack: {} by its ID: {} in bucket.\n'.format(pack_display_name, pack_id)
prints_manager.add_print_job(print_msg, print_color, thread_index, LOG_COLORS.RED)
prints_manager.execute_thread_prints(thread_index)
raise Exception(print_msg)
else:
result_object = ast.literal_eval(response_data)
msg = result_object.get('message', '')
err_msg = 'Search request for pack: {} with ID: {}, failed with status code {}\n{}'.format(
pack_display_name,
pack_id, status_code,
msg)
raise Exception(err_msg)
except Exception as e:
err_msg = 'Search request for pack: {} with ID: {}, failed. Reason:\n{}'.format(pack_display_name, pack_id,
str(e))
prints_manager.add_print_job(err_msg, print_color, thread_index, LOG_COLORS.RED)
lock.acquire()
global SUCCESS_FLAG
SUCCESS_FLAG = False
lock.release()
|
14,838 | def setup(hass, config):
"""Create the ViCare component."""
conf = config[DOMAIN]
params = {"token_file": tempfile.gettempdir() + "/vicare_token.save"}
if conf.get(CONF_CIRCUIT) is not None:
params["circuit"] = conf[CONF_CIRCUIT]
try:
vicare_api = Device(conf[CONF_USERNAME], conf[CONF_PASSWORD], **params)
except AttributeError:
_LOGGER.error(
"Failed to create PyViCare API client. Please check your credentials."
)
return False
hass.data[DOMAIN] = {}
hass.data[DOMAIN][VICARE_API] = vicare_api
hass.data[DOMAIN][VICARE_NAME] = conf[CONF_NAME]
for platform in VICARE_PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
return True
| def setup(hass, config):
"""Create the ViCare component."""
conf = config[DOMAIN]
params = {"token_file": str(Path(tempfile.gettempdir()) / "vicare_token.save")}
if conf.get(CONF_CIRCUIT) is not None:
params["circuit"] = conf[CONF_CIRCUIT]
try:
vicare_api = Device(conf[CONF_USERNAME], conf[CONF_PASSWORD], **params)
except AttributeError:
_LOGGER.error(
"Failed to create PyViCare API client. Please check your credentials."
)
return False
hass.data[DOMAIN] = {}
hass.data[DOMAIN][VICARE_API] = vicare_api
hass.data[DOMAIN][VICARE_NAME] = conf[CONF_NAME]
for platform in VICARE_PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
return True
|
3,035 | def external_error_raised(
expected_exception: Type[Exception],
) -> Callable[[Type[Exception], None], None]:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Exected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
| def external_error_raised(
expected_exception: Type[Exception],
) -> Callable[[Type[Exception], None], None]:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
|
243 | def _generate_wait_repo_command(override: munch.Munch, client: bindings.BodhiClient) \
-> typing.Optional[typing.Tuple[str, str, str, str]]:
"""
Generate and return a koji wait-repo command for the given override, if possible.
Args:
override: A Munch of the Override we want to print a hint about.
client: A BodhiClient that we can use to query the server for Releases.
Returns:
If we know the release for the override's build, we return a tuple suitable for passing to
subprocess.Popen for a koji command that will wait on the repo. If we can't we return None.
"""
# Creating multiple overrides returns a dict object with keys 'overrides' and 'caveat' having a
# length of 2. The response for each override is stored in a list with key 'overrides'
if len(override) == 2:
releases = []
for i in range(len(override["overrides"])):
if 'release_id' in override["overrides"][i]["build"]:
release = client.get_releases(ids=[override["overrides"][i]
["build"]["release_id"]])['releases'][0]
releases.append(('koji', 'wait-repo', f'{release.dist_tag}-build',
f'--build={override["overrides"][i]["build"]["nvr"]}'))
return releases
# Creating a single override only returns a dict object of the information about the override
if 'release_id' in override.build:
release = client.get_releases(ids=[override.build.release_id])['releases'][0]
return ('koji', 'wait-repo', f'{release.dist_tag}-build',
f'--build={override.build.nvr}')
return None
| def _generate_wait_repo_command(override: munch.Munch, client: bindings.BodhiClient) \
-> typing.Optional[typing.Tuple[str, str, str, str]]:
"""
Generate and return a koji wait-repo command for the given override, if possible.
Args:
override: A Munch of the Override we want to print a hint about.
client: A BodhiClient that we can use to query the server for Releases.
Returns:
If we know the release for the override's build, we return a tuple suitable for passing to
subprocess.Popen for a koji command that will wait on the repo. If we can't we return None.
"""
# Creating multiple overrides returns a dict object with keys 'overrides' and 'caveat' having a
# length of 2. The response for each override is stored in a list with key 'overrides'
if len(override) == 2:
releases = []
for i, _ in enumerate(override["overrides"]):
if 'release_id' in override["overrides"][i]["build"]:
release = client.get_releases(ids=[override["overrides"][i]
["build"]["release_id"]])['releases'][0]
releases.append(('koji', 'wait-repo', f'{release.dist_tag}-build',
f'--build={override["overrides"][i]["build"]["nvr"]}'))
return releases
# Creating a single override only returns a dict object of the information about the override
if 'release_id' in override.build:
release = client.get_releases(ids=[override.build.release_id])['releases'][0]
return ('koji', 'wait-repo', f'{release.dist_tag}-build',
f'--build={override.build.nvr}')
return None
|
5,732 | def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \\*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08.
random_projection : bool, optional
If set to ``True``, then gradients along a random vector
are used to check `grad` against forward difference approximation
using `func`. By default it is ``False``.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for reproducing the return value from this function.
The random numbers generated with this seed affect the random vector
along which gradients are computed to check ``grad``. If you supply `seed`
without setting `random_projection` to ``True`` then a ``ValueError``
will be raised.
Returns
-------
err : float
The square root of the sum of squares (i.e., the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
>>> rng = np.random.default_rng()
>>> check_grad(func, grad, [1.5, -1.5],
... random_projection=True, seed=rng)
2.9802322387695312e-08
"""
x0 = np.asarray(x0)
step = kwargs.pop('epsilon', _epsilon)
random_projection = kwargs.pop('random_projection', False)
if random_projection:
random_state = check_random_state(kwargs.pop('seed', None))
if kwargs:
raise ValueError("Unexpected keyword arguments: %r" %
(list(kwargs.keys()),))
def g(w, *args):
func, x0, v = args[0:3]
return func(x0 + w*v, *args[3:])
if random_projection:
v = random_state.binomial(1, 0.5, x0.shape)
v[v == 0] = -1
_args = (func, x0, v) + args
_func = g
vars = np.zeros((1,))
analytical_grad = np.dot(grad(x0, *args), v)
else:
_args = args
_func = func
vars = x0
analytical_grad = grad(x0, *args)
return sqrt(sum((analytical_grad -
approx_fprime(vars, _func, step, *_args))**2))
| def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \\*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08.
random_projection : bool, optional
If set to ``True``, then gradients along a random vector
are used to check `grad` against forward difference approximation
using `func`. By default it is ``False``.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for reproducing the return value from this function.
The random numbers generated with this seed affect the random vector
along which gradients are computed to check ``grad``. If you supply `seed`
without setting `random_projection` to ``True`` then a ``ValueError``
will be raised.
Returns
-------
err : float
The square root of the sum of squares (i.e., the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
>>> rng = np.random.default_rng()
>>> check_grad(func, grad, [1.5, -1.5],
... random_projection=True, seed=rng)
2.9802322387695312e-08
"""
x0 = np.asarray(x0)
step = kwargs.pop('epsilon', _epsilon)
random_projection = kwargs.pop('random_projection', False)
if random_projection:
random_state = check_random_state(kwargs.pop('seed', None))
if kwargs:
raise ValueError("Unexpected keyword arguments: %r" %
(list(kwargs.keys()),))
def g(w, *args):
func, x0, v = args[0:3]
return func(x0 + w*v, *args[3:])
if random_projection:
v = rng_integers(random_state, 0, 2, size=x0.shape)
v[v == 0] = -1
_args = (func, x0, v) + args
_func = g
vars = np.zeros((1,))
analytical_grad = np.dot(grad(x0, *args), v)
else:
_args = args
_func = func
vars = x0
analytical_grad = grad(x0, *args)
return sqrt(sum((analytical_grad -
approx_fprime(vars, _func, step, *_args))**2))
|
29,869 | def _validate_architectures(architectures):
"""Expand and validate architecture data.
Validation includes:
- The list cannot be a combination of strings and Architecture objects.
- The same architecture cannot be defined in multiple `build-to` fields,
even if the implicit values are used to define `build-to`.
- Only one architecture can be defined in the `build-to` list.
- The `all` keyword is properly used. (see `_validate_architectures_all_keyword()`)
:raise ValueError: If architecture data is invalid.
"""
# validate strings and Architecture objects are not mixed
if not (
all(isinstance(architecture, str) for architecture in architectures)
or all(isinstance(architecture, Architecture) for architecture in architectures)
):
raise ValueError(
f"Every item must either be a string or an object for {architectures!r}"
)
_expand_architectures(architectures)
# validate `build-to` after expanding data
if any(len(architecture.build_to) > 1 for architecture in architectures):
raise ValueError("multiple architectures are defined for one 'build-to'")
_validate_architectures_all_keyword(architectures)
if len(architectures) > 1:
# validate multiple uses of the same architecture
unique_build_tos = set()
for element in architectures:
for architecture in element.build_to:
if architecture in unique_build_tos:
raise ValueError(
f"multiple items will build snaps that claim to run on {architecture}"
)
unique_build_tos.add(architecture)
return architectures
| def _validate_architectures(architectures):
"""Expand and validate architecture data.
Validation includes:
- The list cannot be a combination of strings and Architecture objects.
- The same architecture cannot be defined in multiple `build-for` fields,
even if the implicit values are used to define `build-to`.
- Only one architecture can be defined in the `build-to` list.
- The `all` keyword is properly used. (see `_validate_architectures_all_keyword()`)
:raise ValueError: If architecture data is invalid.
"""
# validate strings and Architecture objects are not mixed
if not (
all(isinstance(architecture, str) for architecture in architectures)
or all(isinstance(architecture, Architecture) for architecture in architectures)
):
raise ValueError(
f"Every item must either be a string or an object for {architectures!r}"
)
_expand_architectures(architectures)
# validate `build-to` after expanding data
if any(len(architecture.build_to) > 1 for architecture in architectures):
raise ValueError("multiple architectures are defined for one 'build-to'")
_validate_architectures_all_keyword(architectures)
if len(architectures) > 1:
# validate multiple uses of the same architecture
unique_build_tos = set()
for element in architectures:
for architecture in element.build_to:
if architecture in unique_build_tos:
raise ValueError(
f"multiple items will build snaps that claim to run on {architecture}"
)
unique_build_tos.add(architecture)
return architectures
|
45,718 | def forecast(
precip_fields,
advection_field,
timesteps,
feature_method="blob",
feature_kwargs={},
ari_order=1,
kernel_type="anisotropic",
localization_window_radius=None,
errdist_window_radius=None,
acf_window_radius=None,
extrap_method="semilagrangian",
extrap_kwargs={},
add_perturbations=True,
pert_thrs=(0.5, 1.0),
num_ens_members=40,
vel_pert_method="bps",
vel_pert_kwargs=None,
kmperpixel=None,
timestep=None,
seed=None,
num_workers=1,
measure_time=False,
):
"""Generate a deterministic or ensemble nowcast by using the Lagrangian
INtegro-Difference equation model with Autoregression (LINDA) model.
Parameters
----------
precip_fields : array_like
Array of shape (ari_order + 2, m, n) containing the input rain rate
or reflectivity fields (in linear scale) ordered by timestamp from
oldest to newest. The time steps between the inputs are assumed to be
regular.
advection_field : array_like
Array of shape (2, m, n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs.
timesteps : int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
feature_method : {'blob', 'domain' 'shitomasi'}
Feature detection method:
+-------------------+-----------------------------------------------------+
| Method name | Description |
+===================+=====================================================+
| blob | Laplacian of Gaussian (LoG) blob detector |
| | implemented in scikit-image |
+-------------------+-----------------------------------------------------+
| domain | no feature detection, the model is applied over the |
| | whole domain without localization |
+-------------------+-----------------------------------------------------+
| shitomasi | Shi-Tomasi corner detector implemented in OpenCV |
+-------------------+-----------------------------------------------------+
Default: 'blob'
feature_kwargs : dict, optional
Keyword arguments that are passed as **kwargs for the feature detector.
See :py:mod:`pysteps.feature.blob` and :py:mod:`pysteps.feature.shitomasi`.
ari_order : {1, 2}, optional
The order of the ARI(p, 1) model. Default: 1
kernel_type : {"anisotropic", "isotropic"}, optional
The type of the kernel. Default: 'anisotropic'
localization_window_radius : float, optional
The standard deviation of the Gaussian localization window.
Default: 0.2 * min(m, n)
errdist_window_radius : float, optional
The standard deviation of the Gaussian window for estimating the
forecast error distribution. Default: 0.15 * min(m, n)
acf_window_radius : float, optional
The standard deviation of the Gaussian window for estimating the
forecast error ACF. Default: 0.25 * min(m, n)
extrap_method : str, optional
The extrapolation method to use. See the documentation of
:py:mod:`pysteps.extrapolation.interface`. Default: 'semilagrangian'
extrap_kwargs : dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See :py:mod:`pysteps.extrapolation`.
add_perturbations : bool
Set to False to disable perturbations and generate a single
deterministic nowcast. Default: True
pert_thrs : float
Two-element tuple containing the threshold values for estimating the
perturbation parameters (mm/h). Default: (0.5, 1.0)
num_ens_members : int, optional
The number of ensemble members to generate. Default: 40
vel_pert_method: {'bps', None}, optional
Name of the generator to use for perturbing the advection field. See
:py:mod:`pysteps.noise.interface`. Default: 'bps'
vel_pert_kwargs: dict, optional
Optional dictionary containing keyword arguments 'p_par' and 'p_perp'
for the initializer of the velocity perturbator. The choice of the
optimal parameters depends on the domain and the used optical flow
method. For the default values and parameters optimized for different
domains, see :py:func:`pysteps.nowcasts.steps.forecast`.
kmperpixel: float, optional
Spatial resolution of the input data (kilometers/pixel). Required if
vel_pert_method is not None.
timestep: float, optional
Time step of the motion vectors (minutes). Required if vel_pert_method
is not None.
seed : int, optional
Optional seed for the random generators.
num_workers : int, optional
The number of workers to use for parallel computations. Applicable if
dask is installed. When num_workers>1, it is advisable to disable
OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This
avoids slowdown caused by too many simultaneous threads. Default: 1
measure_time: bool, optional
If set to True, measure, print and return the computation time.
Default: False
Returns
-------
out : numpy.ndarray
A four-dimensional array of shape (num_ens_members, num_timesteps, m, n)
containing a time series of forecast precipitation fields for each
ensemble member. If add_perturbations is False, the first dimension is
dropped. The time series starts from t0 + timestep, where timestep is
taken from the input fields.
"""
_check_inputs(precip_fields, advection_field, timesteps, ari_order)
if localization_window_radius is None:
localization_window_radius = 0.2 * np.min(precip_fields.shape[1:])
if add_perturbations:
if errdist_window_radius is None:
errdist_window_radius = 0.15 * min(
precip_fields.shape[1], precip_fields.shape[2]
)
if acf_window_radius is None:
acf_window_radius = 0.25 * min(
precip_fields.shape[1], precip_fields.shape[2]
)
if vel_pert_method is not None:
if kmperpixel is None:
raise ValueError("vel_pert_method is set but kmperpixel is None")
if timestep is None:
raise ValueError("vel_pert_method is set but timestep is None")
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
print("Computing LINDA nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(
"dimensions: {}x{}".format(
precip_fields.shape[1], precip_fields.shape[2]
)
)
print("number of time steps: {}".format(precip_fields.shape[0]))
print("")
print("Methods")
print("-------")
if add_perturbations:
print("nowcast type: ensemble")
else:
print("nowcast type: deterministic")
print("feature detector: {}".format(feature_method))
print("extrapolator: {}".format(extrap_method))
print("kernel type: {}".format(kernel_type))
if add_perturbations and vel_pert_method is not None:
print("velocity perturbator: {}".format(vel_pert_method))
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print("number of time steps: {}".format(timesteps))
else:
# TODO: implement fractional time steps
raise NotImplementedError("fractional time steps not yet implemented")
print("time steps: {}".format(timesteps))
print("ARI model order: {}".format(ari_order))
print("localization window radius: {}".format(localization_window_radius))
if add_perturbations:
print("error dist. window radius: {}".format(errdist_window_radius))
print("error ACF window radius: {}".format(acf_window_radius))
print("ensemble size: {}".format(num_ens_members))
print("parallel workers: {}".format(num_workers))
print("seed: {}".format(seed))
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_perp", noise.motion.get_default_params_bps_perp()
)
print(
"velocity perturbations, parallel: {:.2f}, {:.2f}, {:.2f}".format(
vp_par[0], vp_par[1], vp_par[2]
)
)
print(
"velocity perturbations, perpendicular: {:.2f}, {:.2f}, {:.2f}".format(
vp_perp[0], vp_perp[1], vp_perp[2]
)
)
vel_pert_kwargs = vel_pert_kwargs.copy()
vel_pert_kwargs["vp_par"] = vp_par
vel_pert_kwargs["vp_perp"] = vp_perp
fct_gen = _linda_init(
precip_fields,
advection_field,
timesteps,
feature_method,
feature_kwargs,
ari_order,
kernel_type,
localization_window_radius,
extrap_method,
extrap_kwargs,
add_perturbations,
vel_pert_method,
vel_pert_kwargs,
kmperpixel,
timestep,
num_workers,
measure_time,
)
if measure_time:
fct_gen, precip_fields_lagr_diff, init_time = fct_gen
else:
fct_gen, precip_fields_lagr_diff = fct_gen
if not add_perturbations:
return _linda_forecast(
precip_fields,
precip_fields_lagr_diff[1:],
timesteps,
fct_gen,
None,
None,
None,
measure_time,
True,
)
else:
print("Estimating forecast errors... ", end="", flush=True)
if measure_time:
starttime = time.time()
precip_fct_det = _linda_forecast(
precip_fields[:-1],
precip_fields_lagr_diff[:-1],
1,
fct_gen,
None,
None,
None,
False,
False,
)
# compute multiplicative forecast errors
err = precip_fct_det[-1] / precip_fields[-1]
# mask small values
mask = np.logical_or(
np.logical_and(
precip_fct_det[-1] >= pert_thrs[1], precip_fields[-1] >= pert_thrs[0]
),
np.logical_and(
precip_fct_det[-1] >= pert_thrs[0], precip_fields[-1] >= pert_thrs[1]
),
)
err[~mask] = np.nan
if measure_time:
print("{:.2f} seconds.".format(time.time() - starttime))
else:
print("done.")
precip_pert_gen = _init_perturbation_generator(
err,
fct_gen,
errdist_window_radius,
acf_window_radius,
localization_window_radius,
measure_time,
num_workers,
)
if vel_pert_method == "bps":
init_vel_noise, generate_vel_noise = noise.get_method("bps")
vp_par = vel_pert_kwargs["vp_par"]
vp_perp = vel_pert_kwargs["vp_perp"]
kwargs = {
"p_par": vp_par,
"p_perp": vp_perp,
}
vel_pert_gen = {
"gen_func": generate_vel_noise,
"init_func": lambda seed: init_vel_noise(
advection_field, 1.0 / kmperpixel, timestep, seed=seed, **kwargs
),
"timestep": timestep,
}
else:
vel_pert_gen = None
def worker(seed):
return _linda_forecast(
precip_fields,
precip_fields_lagr_diff[1:],
timesteps,
fct_gen,
precip_pert_gen,
vel_pert_gen,
seed,
False,
False,
)
precip_fct_ensemble = []
rs = np.random.RandomState(seed)
if DASK_IMPORTED and num_workers > 1:
res = []
for i in range(num_ens_members):
seed = rs.randint(0, high=1e9)
res.append(dask.delayed(worker)(seed))
precip_fct_ensemble = dask.compute(
*res, num_workers=num_workers, scheduler="threads"
)
else:
for i in range(num_ens_members):
seed = rs.randint(0, high=1e9)
precip_fct_ensemble.append(worker(seed))
return np.stack(precip_fct_ensemble)
| def forecast(
precip_fields,
advection_field,
timesteps,
feature_method="blob",
feature_kwargs={},
ari_order=1,
kernel_type="anisotropic",
localization_window_radius=None,
errdist_window_radius=None,
acf_window_radius=None,
extrap_method="semilagrangian",
extrap_kwargs={},
add_perturbations=True,
pert_thrs=(0.5, 1.0),
num_ens_members=40,
vel_pert_method="bps",
vel_pert_kwargs=None,
kmperpixel=None,
timestep=None,
seed=None,
num_workers=1,
measure_time=False,
):
"""Generate a deterministic or ensemble nowcast by using the Lagrangian
INtegro-Difference equation model with Autoregression (LINDA) model.
Parameters
----------
precip_fields : array_like
Array of shape (ari_order + 2, m, n) containing the input rain rate
or reflectivity fields (in linear scale) ordered by timestamp from
oldest to newest. The time steps between the inputs are assumed to be
regular.
advection_field : array_like
Array of shape (2, m, n) containing the x- and y-components of the
advection field. The velocities are assumed to represent one time step
between the inputs.
timesteps : int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
feature_method : {'blob', 'domain' 'shitomasi'}
Feature detection method:
+-------------------+-----------------------------------------------------+
| Method name | Description |
+===================+=====================================================+
| blob | Laplacian of Gaussian (LoG) blob detector |
| | implemented in scikit-image |
+-------------------+-----------------------------------------------------+
| domain | no feature detection, the model is applied over the |
| | whole domain without localization |
+-------------------+-----------------------------------------------------+
| shitomasi | Shi-Tomasi corner detector implemented in OpenCV |
+-------------------+-----------------------------------------------------+
Default: 'blob'
feature_kwargs : dict, optional
Keyword arguments that are passed as **kwargs for the feature detector.
See :py:mod:`pysteps.feature.blob` and :py:mod:`pysteps.feature.shitomasi`.
ari_order : {1, 2}, optional
The order of the ARI(p, 1) model. Default: 1
kernel_type : {"anisotropic", "isotropic"}, optional
The type of the kernel. Default: 'anisotropic'
localization_window_radius : float, optional
The standard deviation of the Gaussian localization window.
Default: 0.2 * min(m, n)
errdist_window_radius : float, optional
The standard deviation of the Gaussian window for estimating the
forecast error distribution. Default: 0.15 * min(m, n)
acf_window_radius : float, optional
The standard deviation of the Gaussian window for estimating the
forecast error ACF. Default: 0.25 * min(m, n)
extrap_method : str, optional
The extrapolation method to use. See the documentation of
:py:mod:`pysteps.extrapolation.interface`. Default: 'semilagrangian'
extrap_kwargs : dict, optional
Optional dictionary containing keyword arguments for the extrapolation
method. See :py:mod:`pysteps.extrapolation`.
add_perturbations : bool
Set to False to disable perturbations and generate a single
deterministic nowcast. Default: True
pert_thrs : float
Two-element tuple containing the threshold values for estimating the
perturbation parameters (mm/h). Default: (0.5, 1.0)
num_ens_members : int, optional
The number of ensemble members to generate. Default: 40
vel_pert_method: {'bps', None}, optional
Name of the generator to use for perturbing the advection field. See
:py:mod:`pysteps.noise.interface`. Default: 'bps'
vel_pert_kwargs: dict, optional
Optional dictionary containing keyword arguments 'p_par' and 'p_perp'
for the initializer of the velocity perturbator. The choice of the
optimal parameters depends on the domain and the used optical flow
method. For the default values and parameters optimized for different
domains, see :py:func:`pysteps.nowcasts.steps.forecast`.
kmperpixel: float, optional
Spatial resolution of the input data (kilometers/pixel). Required if
vel_pert_method is not None.
timestep: float, optional
Time step of the motion vectors (minutes). Required if vel_pert_method
is not None.
seed : int, optional
Optional seed for the random generators.
num_workers : int, optional
The number of workers to use for parallel computations. Applicable if
dask is installed. When num_workers>1, it is advisable to disable
OpenMP by setting the environment variable OMP_NUM_THREADS to 1. This
avoids slowdown caused by too many simultaneous threads. Default: 1
measure_time: bool, optional
If set to True, measure, print and return the computation time.
Default: False
Returns
-------
out : numpy.ndarray
A four-dimensional array of shape (num_ens_members, num_timesteps, m, n)
containing a time series of forecast precipitation fields for each
ensemble member. If add_perturbations is False, the first dimension is
dropped. The time series starts from t0 + timestep, where timestep is
taken from the input fields.
"""
_check_inputs(precip_fields, advection_field, timesteps, ari_order)
if localization_window_radius is None:
localization_window_radius = 0.2 * np.min(precip_fields.shape[1:])
if add_perturbations:
if errdist_window_radius is None:
errdist_window_radius = 0.15 * min(
precip_fields.shape[1], precip_fields.shape[2]
)
if acf_window_radius is None:
acf_window_radius = 0.25 * min(
precip_fields.shape[1], precip_fields.shape[2]
)
if vel_pert_method is not None:
if kmperpixel is None:
raise ValueError("vel_pert_method is set but kmperpixel is None")
if timestep is None:
raise ValueError("vel_pert_method is set but timestep is None")
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
print("Computing LINDA nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print(
"dimensions: {}x{}".format(
precip_fields.shape[1], precip_fields.shape[2]
)
)
print("number of time steps: {}".format(precip_fields.shape[0]))
print("")
print("Methods")
print("-------")
if add_perturbations:
print("nowcast type: ensemble")
else:
print("nowcast type: deterministic")
print("feature detector: {}".format(feature_method))
print("extrapolator: {}".format(extrap_method))
print("kernel type: {}".format(kernel_type))
if add_perturbations and vel_pert_method is not None:
print("velocity perturbator: {}".format(vel_pert_method))
print("")
print("Parameters")
print("----------")
if isinstance(timesteps, int):
print("number of time steps: {}".format(timesteps))
else:
# TODO: implement fractional time steps
raise NotImplementedError("fractional time steps not yet implemented")
print("time steps: {}".format(timesteps))
print("ARI model order: {}".format(ari_order))
print("localization window radius: {}".format(localization_window_radius))
if add_perturbations:
print("error dist. window radius: {}".format(errdist_window_radius))
print("error ACF window radius: {}".format(acf_window_radius))
print("ensemble size: {}".format(num_ens_members))
print("parallel workers: {}".format(num_workers))
print("seed: {}".format(seed))
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_perp", noise.motion.get_default_params_bps_perp()
)
print(
"velocity perturbations, parallel: {:.2f}, {:.2f}, {:.2f}".format(
vp_par[0], vp_par[1], vp_par[2]
)
)
print(
"velocity perturbations, perpendicular: {:.2f}, {:.2f}, {:.2f}".format(
vp_perp[0], vp_perp[1], vp_perp[2]
)
)
vel_pert_kwargs = vel_pert_kwargs.copy()
vel_pert_kwargs["vp_par"] = vp_par
vel_pert_kwargs["vp_perp"] = vp_perp
fct_gen = _linda_init(
precip_fields,
advection_field,
timesteps,
feature_method,
feature_kwargs,
ari_order,
kernel_type,
localization_window_radius,
extrap_method,
extrap_kwargs,
add_perturbations,
vel_pert_method,
vel_pert_kwargs,
kmperpixel,
timestep,
num_workers,
measure_time,
)
if measure_time:
fct_gen, precip_fields_lagr_diff, init_time = fct_gen
else:
fct_gen, precip_fields_lagr_diff = fct_gen
if not add_perturbations:
return _linda_forecast(
precip_fields,
precip_fields_lagr_diff[1:],
timesteps,
fct_gen,
None,
None,
None,
measure_time,
True,
)
else:
print("Estimating forecast errors... ", end="", flush=True)
if measure_time:
starttime = time.time()
precip_fct_det = _linda_forecast(
precip_fields[:-1],
precip_fields_lagr_diff[:-1],
1,
fct_gen,
None,
None,
None,
False,
False,
)
# compute multiplicative forecast errors
err = precip_fct_det[-1] / precip_fields[-1]
# mask small values
mask = np.logical_or(
np.logical_and(
precip_fct_det[-1] >= pert_thrs[1], precip_fields[-1] >= pert_thrs[0]
),
np.logical_and(
precip_fct_det[-1] >= pert_thrs[0], precip_fields[-1] >= pert_thrs[1]
),
)
err[~mask] = np.nan
if measure_time:
print("{:.2f} seconds.".format(time.time() - starttime))
else:
print("done.")
precip_pert_gen = _init_perturbation_generator(
err,
fct_gen,
errdist_window_radius,
acf_window_radius,
localization_window_radius,
measure_time,
num_workers,
)
if vel_pert_method == "bps":
init_vel_noise, generate_vel_noise = noise.get_method("bps")
vp_par = vel_pert_kwargs["vp_par"]
vp_perp = vel_pert_kwargs["vp_perp"]
kwargs = {
"p_par": vp_par,
"p_perp": vp_perp,
}
vel_pert_gen = {
"gen_func": generate_vel_noise,
"init_func": lambda seed: init_vel_noise(
advection_field, 1.0 / kmperpixel, timestep, seed=seed, **kwargs
),
"timestep": timestep,
}
else:
vel_pert_gen = None
def worker(seed):
return _linda_forecast(
precip_fields,
precip_fields_lagr_diff[1:],
timesteps,
fct_gen,
precip_pert_gen,
vel_pert_gen,
seed,
False,
False,
)
precip_fct_ensemble = []
rs = np.random.RandomState(seed)
if DASK_IMPORTED and num_workers > 1:
res = []
for _ in range(num_ens_members):
seed = rs.randint(0, high=1e9)
res.append(dask.delayed(worker)(seed))
precip_fct_ensemble = dask.compute(
*res, num_workers=num_workers, scheduler="threads"
)
else:
for i in range(num_ens_members):
seed = rs.randint(0, high=1e9)
precip_fct_ensemble.append(worker(seed))
return np.stack(precip_fct_ensemble)
|
53,864 | def _validate_trusted_launch(namespace):
if not namespace.security_type:
return
if namespace.security_type.lower() == 'trustedlaunch' and \
(namespace.enable_vtpm is not True or namespace.enable_secure_boot is not True):
logger.warning('Please set --enable-secure-boot to True and --enable-vtpm to True in order to receive the full'
' suite of security features that comes with Trusted Launch.')
| def _validate_trusted_launch(namespace):
if not namespace.security_type or namespace.security_type.lower() != 'trustedlaunch':
return
if not namespace.enable_vtpm or not namespace.enable_secure_boot:
logger.warning('Please set --enable-secure-boot to True and --enable-vtpm to True in order to receive the full'
' suite of security features that comes with Trusted Launch.')
|
18,264 | def test_pkg_install_log_no_log(install_mockery, monkeypatch):
"""Test the installer log function with no log file."""
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
# Attempt installing log without the build log file
with pytest.raises(IOError, match="No such file or directory"):
spack.installer.log(spec.package)
| def test_pkg_install_with_no_log_files(install_mockery):
"""Test the installer log function with no log file."""
# Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package').concretized()
# Attempt installing log without the build log file
with pytest.raises(IOError, match="No such file or directory"):
spack.installer.log(spec.package)
|
49,935 | def detect_motion(timeout_secs=10, noise_threshold=None, mask=None,
region=Region.ALL, frames=None):
"""Generator that yields a sequence of one `MotionResult` for each frame
processed from the device-under-test's video stream.
The `MotionResult` indicates whether any motion was detected -- that is,
any difference between two consecutive frames.
Use it in a ``for`` loop like this::
for motionresult in stbt.detect_motion():
...
In most cases you should use `wait_for_motion` instead.
:type timeout_secs: int or float or None
:param timeout_secs:
A timeout in seconds. After this timeout the iterator will be exhausted.
Thas is, a ``for`` loop like ``for m in detect_motion(timeout_secs=10)``
will terminate after 10 seconds. If ``timeout_secs`` is ``None`` then
the iterator will yield frames forever. Note that you can stop
iterating (for example with ``break``) at any time.
:param float noise_threshold:
The amount of noise to ignore. This is only useful with noisy analogue
video sources. Valid values range from 0 (all differences are
considered noise; a value of 0 will never report motion) to 1.0 (any
difference is considered motion).
This defaults to 0.84. You can override the global default value by
setting ``noise_threshold`` in the ``[motion]`` section of
:ref:`.stbt.conf`.
:type mask: str or `numpy.ndarray`
:param mask:
A black & white image that specifies which part of the image to search
for motion. White pixels select the area to analyse; black pixels select
the area to ignore. The mask must be the same size as the video frame.
This can be a string (a filename that will be resolved as per
`load_image`) or a single-channel image in OpenCV format.
:type region: `Region`
:param region:
Only analyze the specified region of the video frame.
If you specify both ``region`` and ``mask``, the mask must be the same
size as the region.
:type frames: Iterator[stbt.Frame]
:param frames: An iterable of video-frames to analyse. Defaults to
``stbt.frames()``.
| Added in v28: The ``region`` parameter.
| Added in v29: The ``frames`` parameter.
"""
if frames is None:
import stbt
frames = stbt.frames()
frames = limit_time(frames, timeout_secs) # pylint: disable=redefined-variable-type
if noise_threshold is None:
noise_threshold = get_config(
'motion', 'noise_threshold', type_=float)
debug("Searching for motion")
if mask is None:
mask = _ImageFromUser(None, None, None)
else:
mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
debug("Using mask %s" % mask.friendly_name)
frame = next(frames) # pylint:disable=stop-iteration-return
region = Region.intersect(_image_region(frame), region)
previous_frame_gray = cv2.cvtColor(crop(frame, region),
cv2.COLOR_BGR2GRAY)
if (mask.image is not None and
mask.image.shape[:2] != previous_frame_gray.shape[:2]):
raise ValueError(
"The dimensions of the mask '%s' %s don't match the "
"video frame %s" % (
mask.friendly_name, mask.image.shape,
previous_frame_gray.shape))
for frame in frames:
imglog = ImageLogger("detect_motion", region=region)
imglog.imwrite("source", frame)
imglog.set(roi=region, noise_threshold=noise_threshold)
frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
imglog.imwrite("gray", frame_gray)
imglog.imwrite("previous_frame_gray", previous_frame_gray)
absdiff = cv2.absdiff(frame_gray, previous_frame_gray)
previous_frame_gray = frame_gray
imglog.imwrite("absdiff", absdiff)
if mask.image is not None:
absdiff = cv2.bitwise_and(absdiff, mask.image)
imglog.imwrite("mask", mask.image)
imglog.imwrite("absdiff_masked", absdiff)
_, thresholded = cv2.threshold(
absdiff, int((1 - noise_threshold) * 255), 255,
cv2.THRESH_BINARY)
eroded = cv2.erode(
thresholded,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
imglog.imwrite("absdiff_threshold", thresholded)
imglog.imwrite("absdiff_threshold_erode", eroded)
out_region = pixel_bounding_box(eroded)
if out_region:
# Undo cv2.erode above:
out_region = out_region.extend(x=-1, y=-1)
# Undo crop:
out_region = out_region.translate(region.x, region.y)
motion = bool(out_region)
result = MotionResult(getattr(frame, "time", None), motion,
out_region, frame)
draw_on(frame, result, label="detect_motion()")
debug("%s found: %s" % (
"Motion" if motion else "No motion", str(result)))
_log_motion_image_debug(imglog, result)
yield result
| def detect_motion(timeout_secs=10, noise_threshold=None, mask=None,
region=Region.ALL, frames=None):
"""Generator that yields a sequence of one `MotionResult` for each frame
processed from the device-under-test's video stream.
The `MotionResult` indicates whether any motion was detected -- that is,
any difference between two consecutive frames.
Use it in a ``for`` loop like this::
for motionresult in stbt.detect_motion():
...
In most cases you should use `wait_for_motion` instead.
:type timeout_secs: int or float or None
:param timeout_secs:
A timeout in seconds. After this timeout the iterator will be exhausted.
Thas is, a ``for`` loop like ``for m in detect_motion(timeout_secs=10)``
will terminate after 10 seconds. If ``timeout_secs`` is ``None`` then
the iterator will yield frames forever. Note that you can stop
iterating (for example with ``break``) at any time.
:param float noise_threshold:
The amount of noise to ignore. This is only useful with noisy analogue
video sources. Valid values range from 0 (all differences are
considered noise; a value of 0 will never report motion) to 1.0 (any
difference is considered motion).
This defaults to 0.84. You can override the global default value by
setting ``noise_threshold`` in the ``[motion]`` section of
:ref:`.stbt.conf`.
:type mask: str or `numpy.ndarray`
:param mask:
A black & white image that specifies which part of the image to search
for motion. White pixels select the area to analyse; black pixels select
the area to ignore. The mask must be the same size as the video frame.
This can be a string (a filename that will be resolved as per
`load_image`) or a single-channel image in OpenCV format.
:type region: `Region`
:param region:
Only analyze the specified region of the video frame.
If you specify both ``region`` and ``mask``, the mask must be the same
size as the region.
:type frames: Iterator[stbt.Frame]
:param frames: An iterable of video-frames to analyse. Defaults to
``stbt.frames()``.
| Added in v28: The ``region`` parameter.
| Added in v29: The ``frames`` parameter.
"""
if frames is None:
import stbt
frames = stbt.frames()
frames = limit_time(frames, timeout_secs) # pylint: disable=redefined-variable-type
if noise_threshold is None:
noise_threshold = get_config(
'motion', 'noise_threshold', type_=float)
debug("Searching for motion")
if mask is None:
mask = _ImageFromUser(None, None, None)
else:
mask = _load_image(mask, cv2.IMREAD_GRAYSCALE)
debug("Using mask %s" % mask.friendly_name)
try:
frame = next(frames)
except StopIteration:
return
region = Region.intersect(_image_region(frame), region)
previous_frame_gray = cv2.cvtColor(crop(frame, region),
cv2.COLOR_BGR2GRAY)
if (mask.image is not None and
mask.image.shape[:2] != previous_frame_gray.shape[:2]):
raise ValueError(
"The dimensions of the mask '%s' %s don't match the "
"video frame %s" % (
mask.friendly_name, mask.image.shape,
previous_frame_gray.shape))
for frame in frames:
imglog = ImageLogger("detect_motion", region=region)
imglog.imwrite("source", frame)
imglog.set(roi=region, noise_threshold=noise_threshold)
frame_gray = cv2.cvtColor(crop(frame, region), cv2.COLOR_BGR2GRAY)
imglog.imwrite("gray", frame_gray)
imglog.imwrite("previous_frame_gray", previous_frame_gray)
absdiff = cv2.absdiff(frame_gray, previous_frame_gray)
previous_frame_gray = frame_gray
imglog.imwrite("absdiff", absdiff)
if mask.image is not None:
absdiff = cv2.bitwise_and(absdiff, mask.image)
imglog.imwrite("mask", mask.image)
imglog.imwrite("absdiff_masked", absdiff)
_, thresholded = cv2.threshold(
absdiff, int((1 - noise_threshold) * 255), 255,
cv2.THRESH_BINARY)
eroded = cv2.erode(
thresholded,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
imglog.imwrite("absdiff_threshold", thresholded)
imglog.imwrite("absdiff_threshold_erode", eroded)
out_region = pixel_bounding_box(eroded)
if out_region:
# Undo cv2.erode above:
out_region = out_region.extend(x=-1, y=-1)
# Undo crop:
out_region = out_region.translate(region.x, region.y)
motion = bool(out_region)
result = MotionResult(getattr(frame, "time", None), motion,
out_region, frame)
draw_on(frame, result, label="detect_motion()")
debug("%s found: %s" % (
"Motion" if motion else "No motion", str(result)))
_log_motion_image_debug(imglog, result)
yield result
|
57,753 | def fetch_incidents(last_run):
"""
Fetch incidents [IdentityIQ Alerts]
:type last_run: ``[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
"""
now = dt.datetime.now().replace(microsecond=0).isoformat()
last_processed = last_run.get('last_fetch', None)
# Compute the time frame for which the alerts will be requested.
if last_processed is None:
# Never processed, hence time filter window is MAX_ALERT_WINDOW (72 hrs) past from now
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
now_formatted = dt.datetime.strptime(now, DATE_FORMAT)
last_processed_formatted = dt.datetime.strptime(last_processed, DATE_FORMAT)
diff = (now_formatted - last_processed_formatted).total_seconds() / 3600
if diff > MAX_ALERT_WINDOW:
# If the difference between the last run and this run is more than MAX_ALERT_WINDOW (72 hrs),
# then make it only run for past MAX_ALERT_WINDOW (72 hrs)
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
# Else, run for only the delta time (1 min in case of normal execution)
last_processed_past = last_processed
last_processed = now
incidents = []
url = ''.join(
(IIQ_SCIM_ALERTS_URL, '?filter=(lastProcessed gt "', last_processed_past, '" and lastProcessed le "',
last_processed, '")'))
response = send_request(url, "GET", None)
if response is not None and 200 <= response.status_code < 300:
alerts = transform_object_list('IdentityIQ.Alert', response.json()['Resources'])
for alert in alerts:
if 'displayName' in alert:
incident_name = alert['displayName']
else:
incident_name = alert['name']
incident = {
'name': incident_name,
'details': alert['name'],
'occurred': alert['meta']['created'],
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
next_run = {'last_fetch': now}
return next_run, incidents
| def fetch_incidents(last_run):
"""
Fetch incidents [IdentityIQ Alerts]
:type last_run: ``[Dict[str, str]]``
:param last_run:
A dict with a key containing the latest incident created time we got
from last fetch
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
"""
now = dt.datetime.now().replace(microsecond=0).isoformat()
last_processed = last_run.get('last_fetch', None)
# Compute the time frame for which the alerts will be requested.
if last_processed is None:
# Never processed, hence time filter window is MAX_ALERT_WINDOW (72 hrs) past from now
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
now_formatted = dt.datetime.strptime(now, DATE_FORMAT)
last_processed_formatted = dt.datetime.strptime(last_processed, DATE_FORMAT)
diff = (now_formatted - last_processed_formatted).total_seconds() / 3600
if diff > MAX_ALERT_WINDOW:
# If the difference between the last run and this run is more than MAX_ALERT_WINDOW (72 hrs),
# then make it only run for past MAX_ALERT_WINDOW (72 hrs)
last_processed_past = (dt.datetime.strptime(now, DATE_FORMAT) + dt.timedelta(hours=-MAX_ALERT_WINDOW,
minutes=0)).isoformat()
last_processed = now
else:
# Else, run for only the delta time (1 min in case of normal execution)
last_processed_past = last_processed
last_processed = now
incidents = []
url = ''.join(
(IIQ_SCIM_ALERTS_URL, '?filter=(lastProcessed gt "', last_processed_past, '" and lastProcessed le "',
last_processed, '")'))
response = send_request(url, "GET", None)
if response is not None and 200 <= response.status_code < 300:
alerts = transform_object_list('IdentityIQ.Alert', response.json()['Resources'])
for alert in alerts:
if 'displayName' in alert:
incident_name = alert['displayName']
else:
incident_name = alert['name']
incident = {
'name': incident_name,
'details': alert.get('name'),
'occurred': alert.get('meta', {}).get('created'),
'rawJSON': json.dumps(alert)
}
incidents.append(incident)
next_run = {'last_fetch': now}
return next_run, incidents
|
51,515 | def _build_odbc_connection_string(**kwargs):
connection_string = ""
for k, v in kwargs.items():
if connection_string:
connection_string = "{};{}={}".format(connection_string, k, v)
else:
connection_string = "{}={}".format(k, v)
return connection_string
| def _build_odbc_connection_string(**kwargs):
return ";".join([f"{k}={v}" for k, v in kwargs.items()])
|
28,840 | def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, '__args__'):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, '__origin__'):
if PY_310 and tp.__class__ is types.Union:
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError('Literal arguments must be of type str, int, or bool.')
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
| def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, '__args__'):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, '__origin__'):
if PY_310 and tp.__class__ is types.Union:
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError('Literal arguments must be of type str, int, bool, or NoneType.')
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
|
32,061 | def main() -> None:
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
url = params.get('url') + '/v1/json/'
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: Client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers)
commands = {
'camlytics-get-channels': channels_command,
'camlytics-get-events-totals-by-rule': eventstotalsrule_command,
'camlytics-get-events-totals-by-type': eventstotalstype_command,
'camlytics-get-events': events_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
| def main() -> None:
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
url = params.get('url') + '/v1/json/'
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: Client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers)
commands = {
'camlytics-get-channels': channels_command,
'camlytics-get-events-totals-by-rule': eventstotalsrule_command,
'camlytics-get-events-totals-by-type': eventstotalstype_command,
'camlytics-get-events': events_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
|
43,812 | def _inner_out_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the expanded inner portion of the Hamiltonian in :func:`out_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
( \sum_{j,(i,j)\in E}\hat{Z}_{ij}) )^{2}
Args:
graph (nx.DiGraph): the graph specifying possible edges
node (int): a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the node-constraint Hamiltonian.
"""
coeffs = []
ops = []
edges_to_qubits = edges_to_wires(graph)
out_edges = graph.out_edges(node)
d = len(out_edges)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wire))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(-2 * (d - 1))
ops.append(qml.PauliZ(wire))
coeffs.append(d * (d - 2))
ops.append(qml.Identity(0))
coeffs, ops = _collect_duplicates(coeffs, ops)
hamiltonian = qml.Hamiltonian(coeffs, ops)
return hamiltonian
| def _inner_out_flow_constraint_hamiltonian(
graph: nx.DiGraph, node: int
) -> Tuple[List[float], List[qml.operation.Observable]]:
r"""Calculates the expanded inner portion of the Hamiltonian in :func:`out_flow_constraint`.
For a given :math:`i`, this function returns:
.. math::
d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
( \sum_{j,(i,j)\in E}\hat{Z}_{ij}) )^{2}
Args:
graph (nx.DiGraph): the graph specifying possible edges
node (int): a fixed node
Returns:
Tuple[List[float], List[qml.operation.Observable]]: The list of coefficients and list of
observables of the inner part of the node-constraint Hamiltonian.
"""
coeffs = []
ops = []
edges_to_qubits = edges_to_wires(graph)
out_edges = graph.out_edges(node)
d = len(out_edges)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(1)
ops.append(qml.PauliZ(wire))
coeffs, ops = _square_hamiltonian_terms(coeffs, ops)
for edge in out_edges:
wire = (edges_to_qubits[edge],)
coeffs.append(-2 * (d - 1))
ops.append(qml.PauliZ(wire))
coeffs.append(d * (d - 2))
ops.append(qml.Identity(0))
coeffs, ops = _collect_duplicates(coeffs, ops)
return qml.Hamiltonian(coeffs, ops)
|
25,901 | def validate_private_endpoint_connection_id(cmd, ns):
connection_id = ns.connection_id
connection_name = ns.connection_name
vault_name = ns.vault_name
if not connection_id:
if not all([connection_name, vault_name]):
raise argparse.ArgumentError(
None, 'specify both: --connection-name and --vault-name')
ns.resource_group_name = _get_resource_group_from_vault_name(cmd.cli_ctx, vault_name)
else:
if any([connection_name, vault_name]):
raise argparse.ArgumentError(
None, 'you don\'t need to specify --connection-name or --vault-name if --connection-id is specified')
id_parts = connection_id.split('/')
ns.connection_name = id_parts[-1]
ns.vault_name = id_parts[-3]
ns.resource_group_name = id_parts[-7]
| def validate_private_endpoint_connection_id(cmd, ns):
connection_id = ns.connection_id
connection_name = ns.connection_name
vault_name = ns.vault_name
if not connection_id:
if not all([connection_name, vault_name]):
raise argparse.ArgumentError(
None, 'specify both: --connection-name and --name')
ns.resource_group_name = _get_resource_group_from_vault_name(cmd.cli_ctx, vault_name)
else:
if any([connection_name, vault_name]):
raise argparse.ArgumentError(
None, 'you don\'t need to specify --connection-name or --vault-name if --connection-id is specified')
id_parts = connection_id.split('/')
ns.connection_name = id_parts[-1]
ns.vault_name = id_parts[-3]
ns.resource_group_name = id_parts[-7]
|
2,644 | def test_adjusted_rand_score_overflow():
"""Check that large amount of data will not lead to overflow in
`adjusted_rand_score`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20305
"""
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, 100_000, dtype=np.int8)
y_pred = rng.randint(0, 2, 100_000, dtype=np.int8)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
adjusted_rand_score(y_true, y_pred)
| def test_adjusted_rand_score_overflow():
"""Check that large amount of data will not lead to overflow in
`adjusted_rand_score`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20305
"""
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, 100_000, dtype=np.int8)
y_pred = rng.randint(0, 2, 100_000, dtype=np.int8)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
adjusted_rand_score(y_true, y_pred)
|
24,643 | def null_point_find(
x_arr=None,
y_arr=None,
z_arr=None,
u_arr=None,
v_arr=None,
w_arr=None,
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
-------
This method is described by :cite:t:`haynes:2007`.
"""
# Constructing the vspace
vspace = _vector_space(
x_arr,
y_arr,
z_arr,
None,
None,
None,
u_arr,
v_arr,
w_arr,
None,
None,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
| def null_point_find(
x_arr=None,
y_arr=None,
z_arr=None,
u_arr=None,
v_arr=None,
w_arr=None,
MAX_ITERATIONS=500,
err=1e-10,
):
r"""
Returns an array of nullpoint object, representing
the nullpoints of the given vector space.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
Notes
-----
This method is described by :cite:t:`haynes:2007`.
"""
# Constructing the vspace
vspace = _vector_space(
x_arr,
y_arr,
z_arr,
None,
None,
None,
u_arr,
v_arr,
w_arr,
None,
None,
)
return _vspace_iterator(vspace, MAX_ITERATIONS, err)
|
34,605 | def _generate_lookup_regex(
lookup_table: Dict[Text, Union[Text, List[Text]]],
config_language: Text = None,
) -> Text:
"""Creates a regex pattern from the given lookup table.
The lookup table is either a file or a list of entries.
Args:
lookup_table: The lookup table.
Returns:
The regex pattern.
"""
lookup_elements = lookup_table["elements"]
# if it's a list, it should be the elements directly
if isinstance(lookup_elements, list):
elements_to_regex = lookup_elements
# otherwise it's a file path.
else:
elements_to_regex = read_lookup_table_file(lookup_elements)
# sanitize the regex, escape special characters
elements_sanitized = [re.escape(e) for e in elements_to_regex]
not_boundary_language = ["zh", "ja", "th"]
if str(config_language).lower in not_boundary_language:
return "(" + "|".join(elements_sanitized) + ")"
# regex matching elements with word boundaries on either side
return "(\\b" + "\\b|\\b".join(elements_sanitized) + "\\b)"
| def _generate_lookup_regex(
lookup_table: Dict[Text, Union[Text, List[Text]]],
config_language: Text = None,
) -> Text:
"""Creates a regex pattern from the given lookup table.
The lookup table is either a file or a list of entries.
Args:
lookup_table: The lookup table.
Returns:
The regex pattern.
"""
lookup_elements = lookup_table["elements"]
# if it's a list, it should be the elements directly
if isinstance(lookup_elements, list):
elements_to_regex = lookup_elements
# otherwise it's a file path.
else:
elements_to_regex = read_lookup_table_file(lookup_elements)
# sanitize the regex, escape special characters
elements_sanitized = [re.escape(e) for e in elements_to_regex]
not_boundary_language = ["zh", "ja", "th"]
if str(config_language).lower() in not_boundary_language:
return "(" + "|".join(elements_sanitized) + ")"
# regex matching elements with word boundaries on either side
return "(\\b" + "\\b|\\b".join(elements_sanitized) + "\\b)"
|
32,580 | def http_request(method, url, data=None, headers={'Accept': 'application/json'}, url_params=None):
# send http request using user settings for unsecure and proxy parameters
# uses basic auth
# returns the http response
LOG('Attempting {} request to {}'.format(method, url))
try:
response = requests.request(
method,
url,
headers=headers,
data=data,
auth=(USERNAME, PASSWORD),
params=url_params,
verify=USE_SSL,
proxies=PROXIES
)
except requests.exceptions.SSLError as e:
LOG(e)
raise ValueError('An SSL error occurred. Consider to set unsecure')
if is_html_response(response):
html_body = get_html_from_response(response)
demisto.results(html_error_entry(html_body))
raise ValueError('Caught HTML response, please verify server url.')
if response.status_code != 200:
msg = parse_error_response(response)
raise ValueError(msg)
try:
return response.json()
except Exception as e:
LOG(e)
return {}
| def http_request(method, url, data=None, headers={'Accept': 'application/json'}, url_params=None):
# send http request using user settings for unsecure and proxy parameters
# uses basic auth
# returns the http response
LOG('Attempting {} request to {}'.format(method, url))
try:
response = requests.request(
method,
url,
headers=headers,
data=data,
auth=(USERNAME, PASSWORD),
params=url_params,
verify=USE_SSL,
proxies=PROXIES
)
except requests.exceptions.SSLError as e:
LOG(e)
raise ValueError('An SSL error occurred. Consider to set unsecure')
if is_html_response(response):
html_body = get_html_from_response(response)
demisto.results(html_error_entry(html_body))
raise ValueError('Caught HTML response, please verify server url.')
if response.status_code < 200 or response.status_code >= 300:
msg = parse_error_response(response)
raise ValueError(msg)
try:
return response.json()
except Exception as e:
LOG(e)
return {}
|
20,553 | def test_splitext():
assert msct_image.splitext('image.nii') == ('image', '.nii')
assert msct_image.splitext('image.nii.gz') == ('image', '.nii.gz')
assert msct_image.splitext('folder/image.nii.gz') == (os.path.join('folder', 'image'), '.nii.gz')
assert msct_image.splitext('nice.image.nii.gz') == ('nice.image', '.nii.gz')
assert msct_image.splitext('nice.folder/image.nii.gz') == (os.path.join('nice.folder', 'image'), '.nii.gz')
assert msct_image.splitext('image.tar.gz') == ('image', '.tar.gz')
| def test_splitext():
assert msct_image.splitext('image.nii') == ('image', '.nii')
assert msct_image.splitext('image.nii.gz') == ('image', '.nii.gz')
assert msct_image.splitext('folder/image.nii.gz') == (os.path.join('folder', 'image'), '.nii.gz')
assert msct_image.splitext('nice.image.nii.gz') == ('nice.image', '.nii.gz')
assert msct_image.splitext(os.path.join('nice.folder', 'image.nii.gz')) == (os.path.join('nice.folder', 'image'), '.nii.gz')
assert msct_image.splitext('image.tar.gz') == ('image', '.tar.gz')
|
54,397 | def install(requirements: str | list[str], keep_going: bool = False, deps: bool = True):
"""Install the given package and all of its dependencies.
See :ref:`loading packages <loading_packages>` for more information.
This only works for packages that are either pure Python or for packages
with C extensions that are built in Pyodide. If a pure Python package is not
found in the Pyodide repository it will be loaded from PyPI.
When used in web browsers, downloads from PyPI will be cached. When run in
Node.js, packages are currently not cached, and will be re-downloaded each
time ``micropip.install`` is run.
Parameters
----------
requirements : ``str | List[str]``
A requirement or list of requirements to install. Each requirement is a
string, which should be either a package name or URL to a wheel:
- If the requirement ends in ``.whl`` it will be interpreted as a URL.
The file must be a wheel named in compliance with the
`PEP 427 naming convention <https://www.python.org/dev/peps/pep-0427/#file-format>`_.
- If the requirement does not end in ``.whl``, it will interpreted as the
name of a package. A package by this name must either be present in the
Pyodide repository at `indexURL <globalThis.loadPyodide>` or on PyPI
keep_going : ``bool``, default: False
This parameter decides the behavior of the micropip when it encounters a
Python package without a pure Python wheel while doing dependency
resolution:
- If ``False``, an error will be raised on first package with a missing wheel.
- If ``True``, the micropip will keep going after the first error, and report a list
of errors at the end.
deps : ``bool``, default: True
If ``True``, install dependencies of the given packages specified in METADATA file.
If ``False``, install the given packages without dependencies.
Returns
-------
``Future``
A ``Future`` that resolves to ``None`` when all packages have been
downloaded and installed.
"""
importlib.invalidate_caches()
return asyncio.ensure_future(
PACKAGE_MANAGER.install(requirements, keep_going=keep_going, deps=deps)
)
| def install(requirements: str | list[str], keep_going: bool = False, deps: bool = True):
"""Install the given package and all of its dependencies.
See :ref:`loading packages <loading_packages>` for more information.
This only works for packages that are either pure Python or for packages
with C extensions that are built in Pyodide. If a pure Python package is not
found in the Pyodide repository it will be loaded from PyPI.
When used in web browsers, downloads from PyPI will be cached. When run in
Node.js, packages are currently not cached, and will be re-downloaded each
time ``micropip.install`` is run.
Parameters
----------
requirements : ``str | List[str]``
A requirement or list of requirements to install. Each requirement is a
string, which should be either a package name or URL to a wheel:
- If the requirement ends in ``.whl`` it will be interpreted as a URL.
The file must be a wheel named in compliance with the
`PEP 427 naming convention <https://www.python.org/dev/peps/pep-0427/#file-format>`_.
- If the requirement does not end in ``.whl``, it will interpreted as the
name of a package. A package by this name must either be present in the
Pyodide repository at `indexURL <globalThis.loadPyodide>` or on PyPI
keep_going : ``bool``, default: False
This parameter decides the behavior of the micropip when it encounters a
Python package without a pure Python wheel while doing dependency
resolution:
- If ``False``, an error will be raised on first package with a missing wheel.
- If ``True``, the micropip will keep going after the first error, and report a list
of errors at the end.
deps : ``bool``, default: True
If ``True``, install dependencies specified in METADATA file for
each package. Otherwise do not install dependencies.
Returns
-------
``Future``
A ``Future`` that resolves to ``None`` when all packages have been
downloaded and installed.
"""
importlib.invalidate_caches()
return asyncio.ensure_future(
PACKAGE_MANAGER.install(requirements, keep_going=keep_going, deps=deps)
)
|
31,257 | def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
| def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
46,395 | def run(dataset: Dataset, config: TaskConfig):
from frameworks.shared.caller import run_in_venv
data = dict(
train=dict(path=dataset.train.path),
test=dict(path=dataset.test.path),
target=dict(index=dataset.target.index),
domains=dict(cardinalities=[0 if f.values is None else len(f.values) for f in dataset.features]),
format=dataset.train.format
)
config.ext.monitoring = rconfig().monitoring
return run_in_venv(__file__, "exec.py",
input_data=data, dataset=dataset, config=config)
| def run(dataset: Dataset, config: TaskConfig):
from frameworks.shared.caller import run_in_venv
data = dict(
train=dict(path=dataset.train.path),
test=dict(path=dataset.test.path),
target=dict(index=dataset.target.index),
domains=dict(cardinalities=[0 if f.values is None else len(f.values) for f in dataset.features]),
format=dataset.train.format,
)
config.ext.monitoring = rconfig().monitoring
return run_in_venv(__file__, "exec.py",
input_data=data, dataset=dataset, config=config)
|
17,354 | def convert_units(obj, to):
if isinstance(obj, xr.Dataset):
data_vars = {
name: convert_units(array, to) for name, array in obj.data_vars.items()
}
coords = {name: convert_units(array, to) for name, array in obj.coords.items()}
attrs = obj.attrs
new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=attrs)
elif isinstance(obj, xr.DataArray):
name = obj.name
new_units = (
to.get(name, None) or to.get("data", None) or to.get(None, None) or 1
)
data = convert_units(obj.data, {None: new_units})
coords = {
name: (array.dims, convert_units(array.data, to))
for name, array in obj.coords.items()
if name != obj.name
}
dims = obj.dims
attrs = obj.attrs
new_obj = xr.DataArray(name=name, data=data, coords=coords, dims=dims)
elif isinstance(obj, unit_registry.Quantity):
units = to.get(None)
new_obj = obj.to(units) if units is not None else obj
else:
new_obj = obj
return new_obj
| def convert_units(obj, to):
if isinstance(obj, xr.Dataset):
data_vars = {
name: convert_units(array, to) for name, array in obj.data_vars.items()
}
coords = {name: convert_units(array, to) for name, array in obj.coords.items()}
attrs = obj.attrs
new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=attrs)
elif isinstance(obj, xr.DataArray):
name = obj.name
new_units = (
to.get(name, None) or to.get("data", None) or to.get(None, None) or 1
)
data = convert_units(obj.data, {None: new_units})
coords = {
name: (array.dims, convert_units(array.data, to))
for name, array in obj.coords.items()
if name != obj.name
}
dims = obj.dims
attrs = obj.attrs
new_obj = obj.copy(data=data, coords=coords)
elif isinstance(obj, unit_registry.Quantity):
units = to.get(None)
new_obj = obj.to(units) if units is not None else obj
else:
new_obj = obj
return new_obj
|
46,140 | def get_server(panel, port=0, address=None, websocket_origin=None,
loop=None, show=False, start=False, title=None,
verbose=False, location=True, static_dirs={},
oauth_provider=None, oauth_key=None, oauth_secret=None,
oauth_extra_params={}, cookie_secret=None,
oauth_encryption_key=None, session_history=None, **kwargs):
"""
Returns a Server instance with this panel attached as the root
app.
Arguments
---------
panel: Viewable, function or {str: Viewable}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on.
show : boolean (optional, default=False)
Whether to open the server in a new browser tab on start.
start : boolean(optional, default=False)
Whether to start the Server.
title : str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title.
verbose: boolean (optional, default=False)
Whether to report the address and port.
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
static_dirs: dict (optional, default={})
A dictionary of routes and local paths to serve as static file
directories on those routes.
oauth_provider: str
One of the available OAuth providers
oauth_key: str (optional, default=None)
The public OAuth identifier
oauth_secret: str (optional, default=None)
The client secret for the OAuth provider
oauth_extra_params: dict (optional, default={})
Additional information for the OAuth provider
cookie_secret: str (optional, default=None)
A random secret string to sign cookies (required for OAuth)
oauth_encryption_key: str (optional, default=False)
A random encryption key used for encrypting OAuth user
information and access tokens.
session_history: int (optional, default=None)
The amount of session history to accumulate. If set to non-zero
and non-None value will launch a REST endpoint at
/rest/session_info, which returns information about the session
history.
kwargs: dict
Additional keyword arguments to pass to Server instance.
Returns
-------
server : bokeh.server.server.Server
Bokeh Server instance running this panel
"""
from ..config import config
from .rest import REST_PROVIDERS
server_id = kwargs.pop('server_id', uuid.uuid4().hex)
kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
if isinstance(title, dict):
try:
title_ = title[slug]
except KeyError:
raise KeyError(
"Keys of the title dictionnary and of the apps "
f"dictionary must match. No {slug} key found in the "
"title dictionnary.")
else:
title_ = title
slug = slug if slug.startswith('/') else '/'+slug
if 'flask' in sys.modules:
from flask import Flask
if isinstance(app, Flask):
wsgi = WSGIContainer(app)
if slug == '/':
raise ValueError('Flask apps must be served on a subpath.')
if not slug.endswith('/'):
slug += '/'
extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
dict(fallback=wsgi, proxy=slug)))
continue
if isinstance(app, pathlib.Path):
app = str(app) # enables serving apps from Paths
if isinstance(app, str) and (app.endswith(".py") or app.endswith(".ipynb")):
apps[slug] = build_single_handler_application(app)
else:
apps[slug] = Application(FunctionHandler(partial(_eval_panel, app, server_id, title_, location)))
else:
apps = {'/': Application(FunctionHandler(partial(_eval_panel, panel, server_id, title, location)))}
extra_patterns += get_static_routes(static_dirs)
if session_history is not None:
config.session_history = session_history
if config.session_history != 0:
pattern = REST_PROVIDERS['param']([], 'rest')
extra_patterns.extend(pattern)
state.publish('session_info', state, ['session_info'])
opts = dict(kwargs)
if loop:
loop.make_current()
opts['io_loop'] = loop
elif opts.get('num_procs', 1) == 1:
opts['io_loop'] = IOLoop.current()
if 'index' not in opts:
opts['index'] = INDEX_HTML
if address is not None:
opts['address'] = address
if websocket_origin:
if not isinstance(websocket_origin, list):
websocket_origin = [websocket_origin]
opts['allow_websocket_origin'] = websocket_origin
# Configure OAuth
from ..config import config
if config.oauth_provider:
from ..auth import OAuthProvider
opts['auth_provider'] = OAuthProvider()
if oauth_provider:
config.oauth_provider = oauth_provider
if oauth_key:
config.oauth_key = oauth_key
if oauth_extra_params:
config.oauth_extra_params = oauth_extra_params
if cookie_secret:
config.cookie_secret = cookie_secret
opts['cookie_secret'] = config.cookie_secret
server = Server(apps, port=port, **opts)
if verbose:
address = server.address or 'localhost'
print("Launching server at http://%s:%s" % (address, server.port))
state._servers[server_id] = (server, panel, [])
if show:
def show_callback():
server.show('/login' if config.oauth_provider else '/')
server.io_loop.add_callback(show_callback)
def sig_exit(*args, **kwargs):
server.io_loop.add_callback_from_signal(do_stop)
def do_stop(*args, **kwargs):
server.io_loop.stop()
try:
signal.signal(signal.SIGINT, sig_exit)
except ValueError:
pass # Can't use signal on a thread
if start:
server.start()
try:
server.io_loop.start()
except RuntimeError:
pass
return server
| def get_server(panel, port=0, address=None, websocket_origin=None,
loop=None, show=False, start=False, title=None,
verbose=False, location=True, static_dirs={},
oauth_provider=None, oauth_key=None, oauth_secret=None,
oauth_extra_params={}, cookie_secret=None,
oauth_encryption_key=None, session_history=None, **kwargs):
"""
Returns a Server instance with this panel attached as the root
app.
Arguments
---------
panel: Viewable, function or {str: Viewable}
A Panel object, a function returning a Panel object or a
dictionary mapping from the URL slug to either.
port: int (optional, default=0)
Allows specifying a specific port
address : str
The address the server should listen on for HTTP requests.
websocket_origin: str or list(str) (optional)
A list of hosts that can connect to the websocket.
This is typically required when embedding a server app in
an external web site.
If None, "localhost" is used.
loop : tornado.ioloop.IOLoop (optional, default=IOLoop.current())
The tornado IOLoop to run the Server on.
show : boolean (optional, default=False)
Whether to open the server in a new browser tab on start.
start : boolean(optional, default=False)
Whether to start the Server.
title : str or {str: str} (optional, default=None)
An HTML title for the application or a dictionary mapping
from the URL slug to a customized title.
verbose: boolean (optional, default=False)
Whether to report the address and port.
location : boolean or panel.io.location.Location
Whether to create a Location component to observe and
set the URL location.
static_dirs: dict (optional, default={})
A dictionary of routes and local paths to serve as static file
directories on those routes.
oauth_provider: str
One of the available OAuth providers
oauth_key: str (optional, default=None)
The public OAuth identifier
oauth_secret: str (optional, default=None)
The client secret for the OAuth provider
oauth_extra_params: dict (optional, default={})
Additional information for the OAuth provider
cookie_secret: str (optional, default=None)
A random secret string to sign cookies (required for OAuth)
oauth_encryption_key: str (optional, default=False)
A random encryption key used for encrypting OAuth user
information and access tokens.
session_history: int (optional, default=None)
The amount of session history to accumulate. If set to non-zero
and non-None value will launch a REST endpoint at
/rest/session_info, which returns information about the session
history.
kwargs: dict
Additional keyword arguments to pass to Server instance.
Returns
-------
server : bokeh.server.server.Server
Bokeh Server instance running this panel
"""
from ..config import config
from .rest import REST_PROVIDERS
server_id = kwargs.pop('server_id', uuid.uuid4().hex)
kwargs['extra_patterns'] = extra_patterns = kwargs.get('extra_patterns', [])
if isinstance(panel, dict):
apps = {}
for slug, app in panel.items():
if isinstance(title, dict):
try:
title_ = title[slug]
except KeyError:
raise KeyError(
"Keys of the title dictionnary and of the apps "
f"dictionary must match. No {slug} key found in the "
"title dictionary.")
else:
title_ = title
slug = slug if slug.startswith('/') else '/'+slug
if 'flask' in sys.modules:
from flask import Flask
if isinstance(app, Flask):
wsgi = WSGIContainer(app)
if slug == '/':
raise ValueError('Flask apps must be served on a subpath.')
if not slug.endswith('/'):
slug += '/'
extra_patterns.append(('^'+slug+'.*', ProxyFallbackHandler,
dict(fallback=wsgi, proxy=slug)))
continue
if isinstance(app, pathlib.Path):
app = str(app) # enables serving apps from Paths
if isinstance(app, str) and (app.endswith(".py") or app.endswith(".ipynb")):
apps[slug] = build_single_handler_application(app)
else:
apps[slug] = Application(FunctionHandler(partial(_eval_panel, app, server_id, title_, location)))
else:
apps = {'/': Application(FunctionHandler(partial(_eval_panel, panel, server_id, title, location)))}
extra_patterns += get_static_routes(static_dirs)
if session_history is not None:
config.session_history = session_history
if config.session_history != 0:
pattern = REST_PROVIDERS['param']([], 'rest')
extra_patterns.extend(pattern)
state.publish('session_info', state, ['session_info'])
opts = dict(kwargs)
if loop:
loop.make_current()
opts['io_loop'] = loop
elif opts.get('num_procs', 1) == 1:
opts['io_loop'] = IOLoop.current()
if 'index' not in opts:
opts['index'] = INDEX_HTML
if address is not None:
opts['address'] = address
if websocket_origin:
if not isinstance(websocket_origin, list):
websocket_origin = [websocket_origin]
opts['allow_websocket_origin'] = websocket_origin
# Configure OAuth
from ..config import config
if config.oauth_provider:
from ..auth import OAuthProvider
opts['auth_provider'] = OAuthProvider()
if oauth_provider:
config.oauth_provider = oauth_provider
if oauth_key:
config.oauth_key = oauth_key
if oauth_extra_params:
config.oauth_extra_params = oauth_extra_params
if cookie_secret:
config.cookie_secret = cookie_secret
opts['cookie_secret'] = config.cookie_secret
server = Server(apps, port=port, **opts)
if verbose:
address = server.address or 'localhost'
print("Launching server at http://%s:%s" % (address, server.port))
state._servers[server_id] = (server, panel, [])
if show:
def show_callback():
server.show('/login' if config.oauth_provider else '/')
server.io_loop.add_callback(show_callback)
def sig_exit(*args, **kwargs):
server.io_loop.add_callback_from_signal(do_stop)
def do_stop(*args, **kwargs):
server.io_loop.stop()
try:
signal.signal(signal.SIGINT, sig_exit)
except ValueError:
pass # Can't use signal on a thread
if start:
server.start()
try:
server.io_loop.start()
except RuntimeError:
pass
return server
|
20,204 | def purge(url=None):
akamai_config = settings.WAGTAILFRONTENDCACHE.get('akamai', {})
cloudfront_config = settings.WAGTAILFRONTENDCACHE.get(
'cloudfront', {})
if url:
# Use the Wagtail frontendcache PurgeBatch to perform the purge
batch = PurgeBatch()
batch.add_url(url)
# If the URL matches any of our cloudfront distributions, invalidate
# with that backend
if any(k for k in cloudfront_config.get('DISTRIBUTION_ID', {})
if k in url):
logger.info('Purging {} from cloudfront'.format(url))
batch.purge(backends='cloudfront')
# Otherwise invalidate with our default backend
else:
logger.info('Purging {} from akamai'.format(url))
batch.purge(backends='akamai')
return "Submitted invalidation for %s" % url
else:
# purge_all only exists on our AkamaiBackend
backend = AkamaiBackend(akamai_config)
logger.info('Purging entire site from akamai')
backend.purge_all()
return "Submitted invalidation for the entire site."
| def purge(url=None):
akamai_config = settings.WAGTAILFRONTENDCACHE.get('akamai', {})
cloudfront_config = settings.WAGTAILFRONTENDCACHE.get(
'cloudfront', {})
if url:
# Use the Wagtail frontendcache PurgeBatch to perform the purge
batch = PurgeBatch()
batch.add_url(url)
# If the URL matches any of our cloudfront distributions, invalidate
# with that backend
if any(k for k in cloudfront_config.get('DISTRIBUTION_ID', {})
if k in url):
logger.info('Purging {} from cloudfront'.format(url))
batch.purge(backends='cloudfront')
# Otherwise invalidate with our default backend
else:
logger.info('Purging {} from akamai'.format(url))
batch.purge(backends='akamai')
return "Submitted invalidation for %s" % url
else:
# purge_all only exists on our AkamaiBackend
logger.info('Purging entire site from Akamai')
logger.info('Purging entire site from akamai')
backend.purge_all()
return "Submitted invalidation for the entire site."
|
9,405 | def main():
endpoint = "ca/host_key_cert"
key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
ca=dict(type='str', required=True),
meta=dict(type='str', required=True),
certificate=dict(type='str', required=True),
comment=dict(type='str', required=False),
encrypted=dict(type='bool', required=False, default=False),
key=dict(type='str', required=False),
)
)
try:
# This is needed because the bool value only accepts int values in the backend
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
| def main():
endpoint = "ca/host_key_cert"
key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
ca=dict(type='str', required=True),
meta=dict(type='str', required=True),
certificate=dict(type='str', required=True),
comment=dict(type='str', required=False),
encrypted=dict(type='bool', required=False, default=False),
key=dict(type='str', required=False, no_log=True),
)
)
try:
# This is needed because the bool value only accepts int values in the backend
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
|
2,710 | def pairwise_distances(
X, Y=None, metric="euclidean", *, n_jobs=None, force_all_finite=True, **kwds
):
"""Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see :func:`sklearn.metrics.pairwise.pairwise_distances`
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored
for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See Also
--------
pairwise_distances_chunked : Performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding elements
of two arrays.
"""
if (
metric not in _VALID_METRICS
and not callable(metric)
and metric != "precomputed"
):
raise ValueError(
"Unknown metric %s. Valid metrics are %s, or 'precomputed', or a callable"
% (metric, _VALID_METRICS)
)
if metric == "precomputed":
X, _ = check_pairwise_arrays(
X, Y, precomputed=True, force_all_finite=force_all_finite
)
whom = (
"`pairwise_distances`. Precomputed distance "
" need to have non-negative values."
)
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(
_pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds
)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if dtype == bool and (X.dtype != bool or (Y is not None and Y.dtype != bool)):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(
X, Y, dtype=dtype, force_all_finite=force_all_finite
)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric, **kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| def pairwise_distances(
X, Y=None, metric="euclidean", *, n_jobs=None, force_all_finite=True, **kwds
):
"""Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see :func:`sklearn.metrics.pairwise.distance_metrics`
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_features)
Array of pairwise distances between samples, or a feature array.
The shape of the array should be (n_samples_X, n_samples_X) if
metric == "precomputed" and (n_samples_X, n_features) otherwise.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. Only allowed if
metric != "precomputed".
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : bool or 'allow-nan', default=True
Whether to raise an error on np.inf, np.nan, pd.NA in array. Ignored
for a metric listed in ``pairwise.PAIRWISE_DISTANCE_FUNCTIONS``. The
possibilities are:
- True: Force all values of array to be finite.
- False: accepts np.inf, np.nan, pd.NA in array.
- 'allow-nan': accepts only np.nan and pd.NA values in array. Values
cannot be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : ndarray of shape (n_samples_X, n_samples_X) or \
(n_samples_X, n_samples_Y)
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See Also
--------
pairwise_distances_chunked : Performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding elements
of two arrays.
"""
if (
metric not in _VALID_METRICS
and not callable(metric)
and metric != "precomputed"
):
raise ValueError(
"Unknown metric %s. Valid metrics are %s, or 'precomputed', or a callable"
% (metric, _VALID_METRICS)
)
if metric == "precomputed":
X, _ = check_pairwise_arrays(
X, Y, precomputed=True, force_all_finite=force_all_finite
)
whom = (
"`pairwise_distances`. Precomputed distance "
" need to have non-negative values."
)
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(
_pairwise_callable, metric=metric, force_all_finite=force_all_finite, **kwds
)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if dtype == bool and (X.dtype != bool or (Y is not None and Y.dtype != bool)):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(
X, Y, dtype=dtype, force_all_finite=force_all_finite
)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric, **kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
36,184 | def custom404_view(request, exception):
"""
This view handlers which 404 template to render, based on
which host the request was a 404 for. We do this, because
wagtail does not allow us to (currently) specify 404 pages
using the site admin UI, so we need to rely on the Django
methodology for handling 404 responses.
It would be great if we could pull the "which domain uses
which 404 template" information from the wagtail "sites"
configuration, but there is no way to know which template
belongs to which site, as "a site" is not tied to "a django
app" in the wagtail way of things.
"""
if request.site.hostname == 'mozillafestival.org':
html = render(request, 'mozfest/404.html')
return HttpResponseNotFound(html)
else:
html = render(request, '404.html')
return HttpResponseNotFound(html)
| def custom404_view(request, exception):
"""
This view handlers which 404 template to render, based on
which host the request was a 404 for. We do this, because
wagtail does not allow us to (currently) specify 404 pages
using the site admin UI, so we need to rely on the Django
methodology for handling 404 responses.
It would be great if we could pull the "which domain uses
which 404 template" information from the wagtail "sites"
configuration, but there is no way to know which template
belongs to which site, as "a site" is not tied to "a django
app" in the wagtail way of things.
"""
if request.site.hostname == 'mozillafestival.org':
html = render(request, 'mozfest/404.html')
return HttpResponseNotFound(html.content)
else:
html = render(request, '404.html')
return HttpResponseNotFound(html)
|
745 | def probabilistic_least_squares(design_matrix, y, regularization_matrix=None):
# Solve least-squares problem on the form
# design_matrix * coef = y
unscaled_posterior_covariance, pseudoInv, degrees_of_freedom = \
get_data_independent_estimation_quantities(design_matrix, regularization_matrix)
coef_posterior_mean = np.einsum('...ij, ...j->...i', pseudoInv, y)
residuals = y - np.einsum('...ij, ...j->...i', design_matrix, coef_posterior_mean)
residual_variance = (np.sum(residuals ** 2, axis=-1) / degrees_of_freedom)
if y.ndim == 1:
uncertainty_params = probabilistic_ls_quantities(residual_variance,
degrees_of_freedom,
unscaled_posterior_covariance)
else:
uncertainty_params = np.empty(y.shape[0], dtype=object)
for i in range(y.shape[0]):
if design_matrix.ndim == 2:
# Ordinary least-squares: identical design matrix for all voxels
uncertainty_params[i] = probabilistic_ls_quantities(residual_variance[i],
degrees_of_freedom,
unscaled_posterior_covariance)
else:
uncertainty_params[i] = probabilistic_ls_quantities(residual_variance[i],
degrees_of_freedom[i],
unscaled_posterior_covariance[i, ...])
return coef_posterior_mean, uncertainty_params
| def probabilistic_least_squares(design_matrix, y, regularization_matrix=None):
# Solve least-squares problem on the form
# design_matrix * coef = y
unscaled_posterior_covariance, pseudo_inv, degrees_of_freedom = \
get_data_independent_estimation_quantities(design_matrix, regularization_matrix)
coef_posterior_mean = np.einsum('...ij, ...j->...i', pseudoInv, y)
residuals = y - np.einsum('...ij, ...j->...i', design_matrix, coef_posterior_mean)
residual_variance = (np.sum(residuals ** 2, axis=-1) / degrees_of_freedom)
if y.ndim == 1:
uncertainty_params = probabilistic_ls_quantities(residual_variance,
degrees_of_freedom,
unscaled_posterior_covariance)
else:
uncertainty_params = np.empty(y.shape[0], dtype=object)
for i in range(y.shape[0]):
if design_matrix.ndim == 2:
# Ordinary least-squares: identical design matrix for all voxels
uncertainty_params[i] = probabilistic_ls_quantities(residual_variance[i],
degrees_of_freedom,
unscaled_posterior_covariance)
else:
uncertainty_params[i] = probabilistic_ls_quantities(residual_variance[i],
degrees_of_freedom[i],
unscaled_posterior_covariance[i, ...])
return coef_posterior_mean, uncertainty_params
|
50,239 | def send_password_reset_notification(redirect_url, user, manager, staff=False):
"""Trigger sending a password reset notification for the given customer/staff."""
token = default_token_generator.make_token(user)
params = urlencode({"email": user.email, "token": token})
reset_url = prepare_url(params, redirect_url)
user_payload = {
"user": get_default_user_payload(user),
"recipient_email": user.email,
"token": token,
"reset_url": reset_url,
**get_site_context(),
}
if staff:
manager.notify(
NotifyEventType.ACCOUNT_STAFF_RESET_PASSWORD, payload=user_payload
)
else:
manager.notify(NotifyEventType.ACCOUNT_PASSWORD_RESET, payload=user_payload)
| def send_password_reset_notification(redirect_url, user, manager, staff=False):
"""Trigger sending a password reset notification for the given customer/staff."""
token = default_token_generator.make_token(user)
params = urlencode({"email": user.email, "token": token})
reset_url = prepare_url(params, redirect_url)
user_payload = {
"user": get_default_user_payload(user),
"recipient_email": user.email,
"token": token,
"reset_url": reset_url,
**get_site_context(),
}
event = (
NotifyEventType.ACCOUNT_STAFF_RESET_PASSWORD if staff else
NotifyEventType.ACCOUNT_PASSWORD_RESET
)
manager.notify(event, payload=user_payload)
|
33,604 | def test_nested_functions(ray_start_regular):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a recursive remote function.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that both call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
| def test_nested_functions(ray_start_regular):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a remote function that recursively calls itself.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that both call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
|
14,762 | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up certificate expiry sensor."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
return True
| def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up certificate expiry sensor."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=dict(config)
)
)
return True
|
42,791 | def test_str_cat():
"Test Pandas string cat method"
df = pd.DataFrame({"text": ["a", "b", "c", "d"], "numbers": range(1, 5)})
result = process_text(
df=df,
column="text",
string_function="cat",
others=["A", "B", "C", "D"],
)
expected = pd.DataFrame(
{"text": ["aA", "bB", "cC", "dD"], "numbers": [1, 2, 3, 4]}
)
assert_frame_equal(result, expected)
| def test_str_cat():
"Test wrapper for Pandas ``.str.cat()`` method"
df = pd.DataFrame({"text": ["a", "b", "c", "d"], "numbers": range(1, 5)})
result = process_text(
df=df,
column="text",
string_function="cat",
others=["A", "B", "C", "D"],
)
expected = pd.DataFrame(
{"text": ["aA", "bB", "cC", "dD"], "numbers": [1, 2, 3, 4]}
)
assert_frame_equal(result, expected)
|
44,013 | def nuclear_energy(charges, r):
r"""Return a function that computes the nuclear-repulsion energy.
The nuclear-repulsion energy is computed as
.. math::
\sum_{i>j}^n \frac{q_i q_j}{r_{ij}},
where :math:`q`, :math:`r` and :math:`n` denote the nuclear charges (atomic numbers), nuclear
positions and the number of nuclei, respectively.
Args:
charges (list[int]): nuclear charges in atomic units
r (array[float]): nuclear positions
Returns:
function: function that computes the nuclear-repulsion energy
**Example**
>>> symbols = ['H', 'F']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]], requires_grad = True)
>>> mol = qml.hf. Molecule(symbols, geometry)
>>> args = [mol.coordinates]
>>> e = nuclear_energy(mol.nuclear_charges, mol.coordinates)(*args)
>>> print(e)
4.5
"""
def nuclear(*args):
r"""Compute the nuclear-repulsion energy.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
array[float]: nuclear-repulsion energy
"""
if r.requires_grad:
coor = args[0]
else:
coor = r
e = 0
for i, r1 in enumerate(coor):
for j, r2 in enumerate(coor[i + 1 :]):
e = e + (charges[i] * charges[i + j + 1] / anp.linalg.norm(r1 - r2))
return e
return nuclear
| def nuclear_energy(charges, r):
r"""Return a function that computes the nuclear-repulsion energy.
The nuclear-repulsion energy is computed as
.. math::
\sum_{i>j}^n \frac{q_i q_j}{r_{ij}},
where :math:`q`, :math:`r` and :math:`n` denote the nuclear charges (atomic numbers), nuclear
positions and the number of nuclei, respectively.
Args:
charges (list[int]): nuclear charges in atomic units
r (array[float]): nuclear positions
Returns:
function: function that computes the nuclear-repulsion energy
**Example**
>>> symbols = ['H', 'F']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]], requires_grad = True)
>>> mol = qml.hf.Molecule(symbols, geometry)
>>> args = [mol.coordinates]
>>> e = nuclear_energy(mol.nuclear_charges, mol.coordinates)(*args)
>>> print(e)
4.5
"""
def nuclear(*args):
r"""Compute the nuclear-repulsion energy.
Args:
args (array[array[float]]): initial values of the differentiable parameters
Returns:
array[float]: nuclear-repulsion energy
"""
if r.requires_grad:
coor = args[0]
else:
coor = r
e = 0
for i, r1 in enumerate(coor):
for j, r2 in enumerate(coor[i + 1 :]):
e = e + (charges[i] * charges[i + j + 1] / anp.linalg.norm(r1 - r2))
return e
return nuclear
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.