id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
28,093 |
def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
# We need pop == 0 to set the value so as to clear 'x' in systems with
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station_marketid = entry.get('MarketID') or this.station_marketid
# We might pick up StationName in DockingRequested, make sure we clear it if leaving
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
if config.get('station_provider') == 'EDSM':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station)
this.station_link.update_idletasks()
# Update display of 'EDSM Status' image
if this.system_link['text'] != system:
this.system_link['text'] = system or ''
this.system_link['image'] = ''
this.system_link.update_idletasks()
this.multicrew = bool(state['Role'])
if 'StarPos' in entry:
this.coordinates = entry['StarPos']
elif entry['event'] == 'LoadGame':
this.coordinates = None
if entry['event'] in ['LoadGame', 'Commander', 'NewCommander']:
this.newgame = True
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'StartUp':
this.newgame = False
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'Location':
this.newgame = True
this.newgame_docked = entry.get('Docked', False)
this.navbeaconscan = 0
elif entry['event'] == 'NavBeaconScan':
this.navbeaconscan = entry['NumBodies']
# Send interesting events to EDSM
if config.getint('edsm_out') and not is_beta and not this.multicrew and credentials(cmdr) and entry['event'] not in this.discardedEvents:
# Introduce transient states into the event
transient = {
'_systemName': system,
'_systemCoordinates': this.coordinates,
'_stationName': station,
'_shipId': state['ShipID'],
}
entry.update(transient)
if entry['event'] == 'LoadGame':
# Synthesise Materials events on LoadGame since we will have missed it
materials = {
'timestamp': entry['timestamp'],
'event': 'Materials',
'Raw': [ { 'Name': k, 'Count': v } for k,v in state['Raw'].items() ],
'Manufactured': [ { 'Name': k, 'Count': v } for k,v in state['Manufactured'].items() ],
'Encoded': [ { 'Name': k, 'Count': v } for k,v in state['Encoded'].items() ],
}
materials.update(transient)
this.queue.put((cmdr, materials))
this.queue.put((cmdr, entry))
|
def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
this.system_address = entry.get('SystemAddress', this.system_address)
# We need pop == 0 to set the value so as to clear 'x' in systems with
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station_marketid = entry.get('MarketID') or this.station_marketid
# We might pick up StationName in DockingRequested, make sure we clear it if leaving
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
if config.get('station_provider') == 'EDSM':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station)
this.station_link.update_idletasks()
# Update display of 'EDSM Status' image
if this.system_link['text'] != system:
this.system_link['text'] = system or ''
this.system_link['image'] = ''
this.system_link.update_idletasks()
this.multicrew = bool(state['Role'])
if 'StarPos' in entry:
this.coordinates = entry['StarPos']
elif entry['event'] == 'LoadGame':
this.coordinates = None
if entry['event'] in ['LoadGame', 'Commander', 'NewCommander']:
this.newgame = True
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'StartUp':
this.newgame = False
this.newgame_docked = False
this.navbeaconscan = 0
elif entry['event'] == 'Location':
this.newgame = True
this.newgame_docked = entry.get('Docked', False)
this.navbeaconscan = 0
elif entry['event'] == 'NavBeaconScan':
this.navbeaconscan = entry['NumBodies']
# Send interesting events to EDSM
if config.getint('edsm_out') and not is_beta and not this.multicrew and credentials(cmdr) and entry['event'] not in this.discardedEvents:
# Introduce transient states into the event
transient = {
'_systemName': system,
'_systemCoordinates': this.coordinates,
'_stationName': station,
'_shipId': state['ShipID'],
}
entry.update(transient)
if entry['event'] == 'LoadGame':
# Synthesise Materials events on LoadGame since we will have missed it
materials = {
'timestamp': entry['timestamp'],
'event': 'Materials',
'Raw': [ { 'Name': k, 'Count': v } for k,v in state['Raw'].items() ],
'Manufactured': [ { 'Name': k, 'Count': v } for k,v in state['Manufactured'].items() ],
'Encoded': [ { 'Name': k, 'Count': v } for k,v in state['Encoded'].items() ],
}
materials.update(transient)
this.queue.put((cmdr, materials))
this.queue.put((cmdr, entry))
|
22,652 |
def _generate_rst(gallery_conf, fname, content):
"""
Helper function returning the rst text a given example content.
This writes a file gallery_conf['examples_dir']/fname with *content*,
creates the corresponding rst file by running generate_file_rst() and
returns the generated rest code.
Parameters
----------
gallery_conf
A gallery_conf as cerated by the gallery_conf fixture.
fname : str
A filename; e.g. 'test.py'. This is relative to
gallery_conf['examples_dir']
content : str
The content of fname.
Returns
-------
rst : str
The generated rst code.
"""
with codecs.open(os.path.join(gallery_conf['examples_dir'], fname),
mode='w', encoding='utf-8') as f:
f.write('\n'.join(content))
# generate rst file
sg.generate_file_rst(fname, gallery_conf['gallery_dir'],
gallery_conf['examples_dir'], gallery_conf)
# read rst file and check if it contains code output
rst_fname = os.path.splitext(fname)[0] + '.rst'
with codecs.open(os.path.join(gallery_conf['gallery_dir'], rst_fname),
mode='r', encoding='utf-8') as f:
rst = f.read()
return rst
|
def _generate_rst(gallery_conf, fname, content):
"""
Helper function returning the rst text a given example content.
This writes a file gallery_conf['examples_dir']/fname with *content*,
creates the corresponding rst file by running generate_file_rst() and
returns the generated rST code.
Parameters
----------
gallery_conf
A gallery_conf as cerated by the gallery_conf fixture.
fname : str
A filename; e.g. 'test.py'. This is relative to
gallery_conf['examples_dir']
content : str
The content of fname.
Returns
-------
rst : str
The generated rst code.
"""
with codecs.open(os.path.join(gallery_conf['examples_dir'], fname),
mode='w', encoding='utf-8') as f:
f.write('\n'.join(content))
# generate rst file
sg.generate_file_rst(fname, gallery_conf['gallery_dir'],
gallery_conf['examples_dir'], gallery_conf)
# read rst file and check if it contains code output
rst_fname = os.path.splitext(fname)[0] + '.rst'
with codecs.open(os.path.join(gallery_conf['gallery_dir'], rst_fname),
mode='r', encoding='utf-8') as f:
rst = f.read()
return rst
|
57,746 |
def http_request(method, url_suffix, params=None, data=None, files=None, headers=HEADERS, safe=False,
get_token_flag=True, no_json=False, json=None, status_code=None):
"""
A wrapper for requests lib to send our requests and handle requests and responses better.
:param json: JSON body
:type json ``dict`` or ``list``
:type method: ``str``
:param method: HTTP method for the request.
:type url_suffix: ``str``
:param url_suffix: The suffix of the URL (endpoint)
:type params: ``dict``
:param params: The URL params to be passed.
:type data: ``str``
:param data: The body data of the request.
:type headers: ``dict``
:param headers: Request headers
:type safe: ``bool``
:param safe: If set to true will return None in case of http error
:type get_token_flag: ``bool``
:param get_token_flag: If set to True will call get_token()
:type no_json: ``bool``
:param no_json: If set to true will not parse the content and will return the raw response object for successful
response
:type status_code: ``int``
:param: status_code: The request codes to accept as OK.
:return: Returns the http request response json
:rtype: ``dict``
"""
if get_token_flag:
token = get_token()
headers['Authorization'] = 'Bearer {}'.format(token)
url = SERVER + url_suffix
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
data=data,
headers=headers,
files=files,
json=json
)
except requests.exceptions.RequestException:
return_error('Error in connection to the server. Please make sure you entered the URL correctly.')
try:
valid_status_codes = {200, 201, 202, 204}
# Handling a case when we want to return an entry for 404 status code.
if status_code:
valid_status_codes.add(status_code)
if res.status_code not in valid_status_codes:
res_json = res.json()
reason = res.reason
resources = res_json.get('resources', {})
if resources:
for host_id, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message')
reason += f'\nHost ID {host_id} - {error_message}'
elif res_json.get('errors'):
errors = res_json.get('errors', [])
for error in errors:
reason += f"\n{error.get('message')}"
err_msg = 'Error in API call to CrowdStrike Falcon: code: {code} - reason: {reason}'.format(
code=res.status_code,
reason=reason
)
# try to create a new token
if res.status_code == 403 and get_token_flag:
LOG(err_msg)
token = get_token(new_token=True)
headers['Authorization'] = 'Bearer {}'.format(token)
return http_request(method, url_suffix, params, data, headers, safe, get_token_flag=False)
elif safe:
return None
return_error(err_msg)
return res if no_json else res.json()
except ValueError as exception:
raise ValueError(
f'Failed to parse json object from response: {exception} - {res.content}') # type: ignore[str-bytes-safe]
|
def http_request(method, url_suffix, params=None, data=None, files=None, headers=HEADERS, safe=False,
get_token_flag=True, no_json=False, json=None, status_code=None):
"""
A wrapper for requests lib to send our requests and handle requests and responses better.
:param json: JSON body
:type json ``dict`` or ``list``
:type method: ``str``
:param method: HTTP method for the request.
:type url_suffix: ``str``
:param url_suffix: The suffix of the URL (endpoint)
:type params: ``dict``
:param params: The URL params to be passed.
:type data: ``str``
:param data: The body data of the request.
:type headers: ``dict``
:param headers: Request headers
:type safe: ``bool``
:param safe: If set to true will return None in case of http error
:type get_token_flag: ``bool``
:param get_token_flag: If set to True will call get_token()
:type no_json: ``bool``
:param no_json: If set to true will not parse the content and will return the raw response object for successful
response
:type status_code: ``int``
:param: status_code: The request codes to accept as OK.
:return: Returns the http request response json
:rtype: ``dict``
"""
if get_token_flag:
token = get_token()
headers['Authorization'] = 'Bearer {}'.format(token)
url = SERVER + url_suffix
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
data=data,
headers=headers,
files=files,
json=json
)
except requests.exceptions.RequestException:
return_error('Error in connection to the server. Please make sure you entered the URL correctly.')
try:
valid_status_codes = {200, 201, 202, 204}
# Handling a case when we want to return an entry for 404 status code.
if status_code:
valid_status_codes.add(status_code)
if res.status_code not in valid_status_codes:
res_json = res.json()
reason = res.reason
resources = res_json.get('resources', {})
if resources:
for host_id, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message')
reason += f'\nHost ID {host_id} - {error_message}'
elif res_json.get('errors'):
errors = res_json.get('errors', [])
for error in errors:
reason += f"\n{error.get('message')}"
err_msg = 'Error in API call to CrowdStrike Falcon: code: {code} - reason: {reason}'.format(
code=res.status_code,
reason=reason
)
# try to create a new token
if res.status_code == 403 and get_token_flag:
LOG(err_msg)
token = get_token(new_token=True)
headers['Authorization'] = 'Bearer {}'.format(token)
return http_request(method, url_suffix, params, data, headers, safe, get_token_flag=False)
elif safe:
return None
return_error(err_msg)
return res if no_json else res.json()
except ValueError as exception:
raise ValueError(
f'Failed to parse json object from response: {exception} - {res.content}') # type: ignore[str-bytes-safe]
|
38,429 |
def map_subgrid_to_grid(
g: pp.Grid,
loc_faces: np.ndarray,
loc_cells: np.ndarray,
is_vector: bool,
nd: Optional[int] = None,
) -> Tuple[np.ndarray, np.ndarray]:
""" Obtain mappings from the cells and faces of a subgrid back to a larger grid.
Parameters:
g (pp.Grid): The larger grid.
loc_faces (np.ndarray): For each face in the subgrid, the index of the
corresponding face in the larger grid.
loc_cells (np.ndarray): For each cell in the subgrid, the index of the
corresponding cell in the larger grid.
is_vector (bool): If True, the returned mappings are sized to fit with vector
variables, with g.dim elements per cell and face.
nd (int, optional): Dimension. Defaults to g.dim.
Retuns:
sps.csr_matrix, size (g.num_faces, loc_faces.size): Mapping from local to
global faces. If is_vector is True, the size is multiplied with g.dim.
sps.csr_matrix, size (loc_cells.size, g.num_cells): Mapping from global to
local cells. If is_vector is True, the size is multiplied with g.dim.
"""
if nd is None:
nd = g.dim
num_faces_loc = loc_faces.size
num_cells_loc = loc_cells.size
if is_vector:
face_map = sps.csr_matrix(
(
np.ones(num_faces_loc * nd),
(expand_indices_nd(loc_faces, nd), np.arange(num_faces_loc * nd)),
),
shape=(g.num_faces * nd, num_faces_loc * nd),
)
cell_map = sps.csr_matrix(
(
np.ones(num_cells_loc * nd),
(np.arange(num_cells_loc * nd), expand_indices_nd(loc_cells, nd)),
),
shape=(num_cells_loc * nd, g.num_cells * nd),
)
else:
face_map = sps.csr_matrix(
(np.ones(num_faces_loc), (loc_faces, np.arange(num_faces_loc))),
shape=(g.num_faces, num_faces_loc),
)
cell_map = sps.csr_matrix(
(np.ones(num_cells_loc), (np.arange(num_cells_loc), loc_cells)),
shape=(num_cells_loc, g.num_cells),
)
return face_map, cell_map
|
def map_subgrid_to_grid(
g: pp.Grid,
loc_faces: np.ndarray,
loc_cells: np.ndarray,
is_vector: bool,
nd: Optional[int] = None,
) -> Tuple[np.ndarray, np.ndarray]:
""" Obtain mappings from the cells and faces of a subgrid back to a larger grid.
Parameters:
g (pp.Grid): The larger grid.
loc_faces (np.ndarray): For each face in the subgrid, the index of the
corresponding face in the larger grid.
loc_cells (np.ndarray): For each cell in the subgrid, the index of the
corresponding cell in the larger grid.
is_vector (bool): If True, the returned mappings are sized to fit with vector
variables, with nd elements per cell and face.
nd (int, optional): Dimension. Defaults to g.dim.
Retuns:
sps.csr_matrix, size (g.num_faces, loc_faces.size): Mapping from local to
global faces. If is_vector is True, the size is multiplied with g.dim.
sps.csr_matrix, size (loc_cells.size, g.num_cells): Mapping from global to
local cells. If is_vector is True, the size is multiplied with g.dim.
"""
if nd is None:
nd = g.dim
num_faces_loc = loc_faces.size
num_cells_loc = loc_cells.size
if is_vector:
face_map = sps.csr_matrix(
(
np.ones(num_faces_loc * nd),
(expand_indices_nd(loc_faces, nd), np.arange(num_faces_loc * nd)),
),
shape=(g.num_faces * nd, num_faces_loc * nd),
)
cell_map = sps.csr_matrix(
(
np.ones(num_cells_loc * nd),
(np.arange(num_cells_loc * nd), expand_indices_nd(loc_cells, nd)),
),
shape=(num_cells_loc * nd, g.num_cells * nd),
)
else:
face_map = sps.csr_matrix(
(np.ones(num_faces_loc), (loc_faces, np.arange(num_faces_loc))),
shape=(g.num_faces, num_faces_loc),
)
cell_map = sps.csr_matrix(
(np.ones(num_cells_loc), (np.arange(num_cells_loc), loc_cells)),
shape=(num_cells_loc, g.num_cells),
)
return face_map, cell_map
|
24,646 |
def get_file(basename, base_url=_BASE_URL):
r"""
Downloads a file from a URL (if the file does not already exist) and
returns the full local path to the file.
Parameters
----------
basename : str
Name of the file to be downloaded (extension included).
base_url : str, optional
The base URL of the file to be downloaded. Defaults to the main
directory of the PlasmaPy data repository.
Returns
-------
path : str
The full local path to the downloaded file.
"""
if not "." in str(basename):
raise ValueError(f"'filename' ({basename}) must include an extension.")
path = os.path.join(_DOWNLOADS_PATH, basename)
# If file doesn't exist, download it
if not os.path.exists(path):
url = urljoin(base_url, basename)
# Get the requested content
r = requests.get(url)
# Validate that the content type matches one of the content types
# the module knows how to download.
#
# Missing files on GitHub will resolve to a 404 html page, so we use
# this as an indicator that the file may not exist.
allowed_types = ["text/plain; charset=utf-8", "image/png"]
if not r.headers["Content-Type"] in allowed_types:
raise OSError(
f"The requested file is not an allowed"
f"Content-Type: {r.headers['Content-Type']}."
"This may indicate that the file does not exist at "
"the URL provided."
)
# Write the content to disk
with open(path, "wb") as f:
f.write(r.content)
return path
|
def get_file(basename, base_url=_BASE_URL):
r"""
Downloads a file from a URL (if the file does not already exist) and
returns the full local path to the file.
Parameters
----------
basename : str
Name of the file to be downloaded (extension included).
base_url : str, optional
The base URL of the file to be downloaded. Defaults to the main
directory of the PlasmaPy data repository.
Returns
-------
path : str
The full local path to the downloaded file.
"""
if not "." in str(basename):
raise ValueError(f"'filename' ({basename}) must include an extension.")
path = os.path.join(_DOWNLOADS_PATH, basename)
# If file doesn't exist locally, download it
if not os.path.exists(path):
url = urljoin(base_url, basename)
# Get the requested content
r = requests.get(url)
# Validate that the content type matches one of the content types
# the module knows how to download.
#
# Missing files on GitHub will resolve to a 404 html page, so we use
# this as an indicator that the file may not exist.
allowed_types = ["text/plain; charset=utf-8", "image/png"]
if not r.headers["Content-Type"] in allowed_types:
raise OSError(
f"The requested file is not an allowed"
f"Content-Type: {r.headers['Content-Type']}."
"This may indicate that the file does not exist at "
"the URL provided."
)
# Write the content to disk
with open(path, "wb") as f:
f.write(r.content)
return path
|
26,717 |
def get_all_backportable_providers() -> List[str]:
"""
Returns all providers that should be taken into account when preparing backports.
For now remove cncf.kubernetes as it has no chances to work with current core of Airflow 2.0
:return: list of providers that are considered for backport packages
"""
# TODO: Maybe we should fix it and release cnncf.kubernetes separately
excluded_providers = ["cncf.kubernetes"]
return [prov for prov in PROVIDERS_REQUIREMENTS.keys() if prov not in excluded_providers]
|
def get_all_backportable_providers() -> List[str]:
"""
Returns all providers that should be taken into account when preparing backports.
For now remove cncf.kubernetes as it has no chances to work with current core of Airflow 2.0
:return: list of providers that are considered for backport packages
"""
# TODO: Maybe we should fix it and release cncf.kubernetes separately
excluded_providers = ["cncf.kubernetes"]
return [prov for prov in PROVIDERS_REQUIREMENTS.keys() if prov not in excluded_providers]
|
1,648 |
def test_one_hot_encoder_drop_equals_if_binary():
X = [['Male', 1], ['Female', 3], ['Female', 2]]
expected = np.array([[1., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 1., 0.]])
ohe = OneHotEncoder(drop='if_binary')
ohe.fit(X)
result = ohe.transform(X).toarray()
assert_array_equal(expected, result)
|
def test_one_hot_encoder_drop_equals_if_binary():
X = [['Male', 1], ['Female', 3], ['Female', 2]]
expected = np.array([[1., 1., 0., 0.], [0., 0., 0., 1.], [0., 0., 1., 0.]])
ohe = OneHotEncoder(drop='if_binary', sparse=False)
ohe.fit(X)
result = ohe.transform(X).toarray()
assert_array_equal(expected, result)
|
8,432 |
def _centroid_single_region(spectrum, region=None):
"""
Calculate the centroid of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the centroid will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the centroid.
Returns
-------
centroid : float or list (based on region input)
Centroid of the spectrum or within the regions
Notes
-----
This is a helper function for the above `centroid()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
flux = calc_spectrum.flux
dispersion = (calc_spectrum.spectral_axis).quantity
if len(flux.shape) > 1:
dispersion = (np.tile(dispersion, [flux.shape[0], 1]))
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return np.sum(flux * dispersion, axis=-1) / np.sum(flux, axis=-1)
|
def _centroid_single_region(spectrum, region=None):
"""
Calculate the centroid of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the centroid will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the centroid.
Returns
-------
centroid : float or list (based on region input)
Centroid of the spectrum or within the regions
Notes
-----
This is a helper function for the above `centroid()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
flux = calc_spectrum.flux
dispersion = (calc_spectrum.spectral_axis).quantity
if len(flux.shape) > 1:
dispersion = np.tile(dispersion, [flux.shape[0], 1])
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return np.sum(flux * dispersion, axis=-1) / np.sum(flux, axis=-1)
|
29,346 |
def main(args: Optional[Sequence[str]]=None) -> None:
"""Starts up a development server running Oppia."""
parsed_args = _PARSER.parse_args(args=args)
if common.is_port_in_use(PORT_NUMBER_FOR_GAE_SERVER): # type: ignore[no-untyped-call]
common.print_each_string_after_two_new_lines([ # type: ignore[no-untyped-call]
'WARNING',
'Could not start new server. There is already an existing server '
'running at port %s.' % PORT_NUMBER_FOR_GAE_SERVER,
])
# NOTE: The ordering of alert_on_exit() is important because we want the
# alert to be printed _before_ the ExitStack unwinds, hence its placement as
# the "latter" context (context managers exit in reverse-order).
with contextlib.ExitStack() as stack, alert_on_exit():
# ExitStack unwinds in reverse-order, so this will be the final action.
stack.callback(notify_about_successful_shutdown)
stack.callback(call_extend_index_yaml)
build_args = []
if parsed_args.prod_env:
build_args.append('--prod_env')
if parsed_args.maintenance_mode:
build_args.append('--maintenance_mode')
if parsed_args.source_maps:
build_args.append('--source_maps')
build.main(args=build_args) # type: ignore[no-untyped-call]
stack.callback(build.set_constants_to_default)
stack.enter_context(servers.managed_redis_server())
stack.enter_context(servers.managed_elasticsearch_dev_server())
if constants.EMULATOR_MODE:
stack.enter_context(servers.managed_firebase_auth_emulator(
recover_users=parsed_args.save_datastore))
stack.enter_context(servers.managed_cloud_datastore_emulator(
clear_datastore=not parsed_args.save_datastore))
# NOTE: When prod_env=True the Webpack compiler is run by build.main().
if not parsed_args.prod_env:
stack.enter_context(servers.managed_webpack_compiler(
use_prod_env=False, use_source_maps=parsed_args.source_maps,
watch_mode=True))
app_yaml_path = 'app.yaml' if parsed_args.prod_env else 'app_dev.yaml'
dev_appserver = stack.enter_context(servers.managed_dev_appserver(
app_yaml_path,
enable_host_checking=not parsed_args.disable_host_checking,
automatic_restart=not parsed_args.no_auto_restart,
skip_sdk_update_check=True,
port=PORT_NUMBER_FOR_GAE_SERVER))
managed_web_browser = (
None if parsed_args.no_browser else
servers.create_managed_web_browser(PORT_NUMBER_FOR_GAE_SERVER)) # type: ignore[no-untyped-call]
if managed_web_browser is None:
common.print_each_string_after_two_new_lines([ # type: ignore[no-untyped-call]
'INFORMATION',
'Local development server is ready! You can access it by '
'navigating to http://localhost:%s/ in a web '
'browser.' % PORT_NUMBER_FOR_GAE_SERVER,
])
else:
common.print_each_string_after_two_new_lines([ # type: ignore[no-untyped-call]
'INFORMATION',
'Local development server is ready! Opening a default web '
'browser window pointing to it: '
'http://localhost:%s/' % PORT_NUMBER_FOR_GAE_SERVER,
])
stack.enter_context(managed_web_browser)
dev_appserver.wait()
|
def main(args: Optional[Sequence[str]] = None) -> None:
"""Starts up a development server running Oppia."""
parsed_args = _PARSER.parse_args(args=args)
if common.is_port_in_use(PORT_NUMBER_FOR_GAE_SERVER): # type: ignore[no-untyped-call]
common.print_each_string_after_two_new_lines([ # type: ignore[no-untyped-call]
'WARNING',
'Could not start new server. There is already an existing server '
'running at port %s.' % PORT_NUMBER_FOR_GAE_SERVER,
])
# NOTE: The ordering of alert_on_exit() is important because we want the
# alert to be printed _before_ the ExitStack unwinds, hence its placement as
# the "latter" context (context managers exit in reverse-order).
with contextlib.ExitStack() as stack, alert_on_exit():
# ExitStack unwinds in reverse-order, so this will be the final action.
stack.callback(notify_about_successful_shutdown)
stack.callback(call_extend_index_yaml)
build_args = []
if parsed_args.prod_env:
build_args.append('--prod_env')
if parsed_args.maintenance_mode:
build_args.append('--maintenance_mode')
if parsed_args.source_maps:
build_args.append('--source_maps')
build.main(args=build_args) # type: ignore[no-untyped-call]
stack.callback(build.set_constants_to_default)
stack.enter_context(servers.managed_redis_server())
stack.enter_context(servers.managed_elasticsearch_dev_server())
if constants.EMULATOR_MODE:
stack.enter_context(servers.managed_firebase_auth_emulator(
recover_users=parsed_args.save_datastore))
stack.enter_context(servers.managed_cloud_datastore_emulator(
clear_datastore=not parsed_args.save_datastore))
# NOTE: When prod_env=True the Webpack compiler is run by build.main().
if not parsed_args.prod_env:
stack.enter_context(servers.managed_webpack_compiler(
use_prod_env=False, use_source_maps=parsed_args.source_maps,
watch_mode=True))
app_yaml_path = 'app.yaml' if parsed_args.prod_env else 'app_dev.yaml'
dev_appserver = stack.enter_context(servers.managed_dev_appserver(
app_yaml_path,
enable_host_checking=not parsed_args.disable_host_checking,
automatic_restart=not parsed_args.no_auto_restart,
skip_sdk_update_check=True,
port=PORT_NUMBER_FOR_GAE_SERVER))
managed_web_browser = (
None if parsed_args.no_browser else
servers.create_managed_web_browser(PORT_NUMBER_FOR_GAE_SERVER)) # type: ignore[no-untyped-call]
if managed_web_browser is None:
common.print_each_string_after_two_new_lines([ # type: ignore[no-untyped-call]
'INFORMATION',
'Local development server is ready! You can access it by '
'navigating to http://localhost:%s/ in a web '
'browser.' % PORT_NUMBER_FOR_GAE_SERVER,
])
else:
common.print_each_string_after_two_new_lines([ # type: ignore[no-untyped-call]
'INFORMATION',
'Local development server is ready! Opening a default web '
'browser window pointing to it: '
'http://localhost:%s/' % PORT_NUMBER_FOR_GAE_SERVER,
])
stack.enter_context(managed_web_browser)
dev_appserver.wait()
|
31,606 |
def get_computer_command(client: Client, computer_id: int, expand: List[str], overrides: bool) -> CommandResults:
"""
Obtain information about an existing computer inside Trend Micro.
Args:
client (client): The Trend Micro API client.
computer_id (int): The ID of the computer to get its information.
expand (List[str]): The desired information about the computers.
overrides (bool): Whether to get the overridden properties or not.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
response = client.get_computer(computer_id=computer_id, expand=expand, overrides=overrides)
markdown = tableToMarkdown(f"Details for the computer {response['hostName']}", response, removeNull=True,
headers=COMPUTER_TABLE_HEADERS, headerTransform=pascalToSpace)
return CommandResults(outputs_prefix="TrendMicro.Computers", outputs_key_field="ID", outputs=response,
readable_output=markdown, raw_response=response)
|
def get_computer_command(client: Client, computer_id: int, expand: List[str], overrides: bool) -> CommandResults:
"""
Obtain information about an existing computer inside Trend Micro.
Args:
client (client): The Trend Micro API client.
computer_id (int): The ID of the computer to get its information.
expand (List[str]): The desired information about the computers.
overrides (bool): Whether to get the overridden properties or not.
Returns:
CommandResults: Command results with raw response, outputs and readable outputs.
"""
response = client.get_computer(computer_id=computer_id, expand=expand, overrides=overrides)
markdown = tableToMarkdown(f"Details for the computer {response.get('hostName')}", response, removeNull=True,
headers=COMPUTER_TABLE_HEADERS, headerTransform=pascalToSpace)
return CommandResults(outputs_prefix="TrendMicro.Computers", outputs_key_field="ID", outputs=response,
readable_output=markdown, raw_response=response)
|
52,783 |
def _json_wrap_input(input, value, handle_files="skip"):
input_type = input.type
def _data_input_to_path(v):
path = _cast_if_not_none(v, str)
if path == "None":
path = None
return path
if input_type == "repeat":
repeat_job_value = []
for d in value:
repeat_instance_job_value = {}
json_wrap(input.inputs, d, repeat_instance_job_value)
repeat_job_value.append(repeat_instance_job_value)
json_value = repeat_job_value
elif input_type == "conditional":
values = value
current = values["__current_case__"]
conditional_job_value = {}
json_wrap(input.cases[current].inputs, values, conditional_job_value)
test_param = input.test_param
test_param_name = test_param.name
test_value = _json_wrap_input(test_param, values[test_param_name])
conditional_job_value[test_param_name] = test_value
json_value = conditional_job_value
elif input_type == "section":
values = value
section_job_value = {}
json_wrap(input.inputs, values, section_job_value)
json_value = section_job_value
elif input_type == "data" and input.multiple:
if handle_files == "paths":
json_value = list(map(_data_input_to_path, value))
elif handle_files == "skip":
return SKIP_INPUT
else:
raise NotImplementedError()
elif input_type == "data":
if handle_files == "paths":
json_value = _data_input_to_path(value)
elif handle_files == "skip":
return SKIP_INPUT
elif handle_files == "OBJECT":
if value:
if isinstance(value, list):
value = value[0]
return _hda_to_object(value)
else:
return None
raise NotImplementedError()
elif input_type == "data_collection":
if handle_files == "skip":
return SKIP_INPUT
raise NotImplementedError()
elif input_type in ["select", "text", "color", "hidden"]:
json_value = _cast_if_not_none(value, str)
elif input_type == "float":
json_value = _cast_if_not_none(value, float, empty_to_none=True)
elif input_type == "integer":
json_value = _cast_if_not_none(value, int, empty_to_none=True)
elif input_type == "boolean":
json_value = _cast_if_not_none(value, bool)
elif input_type == "data_column":
# value is a SelectToolParameterWrapper()
if input.multiple:
json_value = [int(_) for _ in _cast_if_not_none(value.value, list)]
else:
json_value = [_cast_if_not_none(value.value, int)]
else:
raise NotImplementedError("input_type [%s] not implemented" % input_type)
return json_value
|
def _json_wrap_input(input, value, handle_files="skip"):
input_type = input.type
def _data_input_to_path(v):
path = _cast_if_not_none(v, str)
if path == "None":
path = None
return path
if input_type == "repeat":
repeat_job_value = []
for d in value:
repeat_instance_job_value = {}
json_wrap(input.inputs, d, repeat_instance_job_value)
repeat_job_value.append(repeat_instance_job_value)
json_value = repeat_job_value
elif input_type == "conditional":
values = value
current = values["__current_case__"]
conditional_job_value = {}
json_wrap(input.cases[current].inputs, values, conditional_job_value)
test_param = input.test_param
test_param_name = test_param.name
test_value = _json_wrap_input(test_param, values[test_param_name])
conditional_job_value[test_param_name] = test_value
json_value = conditional_job_value
elif input_type == "section":
values = value
section_job_value = {}
json_wrap(input.inputs, values, section_job_value)
json_value = section_job_value
elif input_type == "data" and input.multiple:
if handle_files == "paths":
json_value = list(map(_data_input_to_path, value))
elif handle_files == "skip":
return SKIP_INPUT
else:
raise NotImplementedError()
elif input_type == "data":
if handle_files == "paths":
json_value = _data_input_to_path(value)
elif handle_files == "skip":
return SKIP_INPUT
elif handle_files == "OBJECT":
if value:
if isinstance(value, list):
value = value[0]
return _hda_to_object(value)
else:
return None
else:
raise NotImplementedError()
elif input_type == "data_collection":
if handle_files == "skip":
return SKIP_INPUT
raise NotImplementedError()
elif input_type in ["select", "text", "color", "hidden"]:
json_value = _cast_if_not_none(value, str)
elif input_type == "float":
json_value = _cast_if_not_none(value, float, empty_to_none=True)
elif input_type == "integer":
json_value = _cast_if_not_none(value, int, empty_to_none=True)
elif input_type == "boolean":
json_value = _cast_if_not_none(value, bool)
elif input_type == "data_column":
# value is a SelectToolParameterWrapper()
if input.multiple:
json_value = [int(_) for _ in _cast_if_not_none(value.value, list)]
else:
json_value = [_cast_if_not_none(value.value, int)]
else:
raise NotImplementedError("input_type [%s] not implemented" % input_type)
return json_value
|
49,878 |
def boland(ghi, zenith, datetime_or_doy, min_cos_zenith=0.065, max_zenith=87):
r"""
Estimate DNI and DHI from GHI using the Boland clearness index model.
The Boland model [1]_, [2]_ estimates the diffuse fraction, DF, from global
horizontal irradiance, GHI, through an empirical relationship between DF
and the ratio of GHI to extraterrestrial irradiance or clearness index, kt.
.. math::
\mathit{DF} = \frac{1}{1 + \exp\left(-5 + 8.6 k_t\right)}
where :math:`k_t` is the clearness index.
Parameters
----------
ghi: numeric
Global horizontal irradiance in W/m^2.
zenith: numeric
True (not refraction-corrected) zenith angles in decimal degrees.
datetime_or_doy : int, float, array, pd.DatetimeIndex
Day of year or array of days of year e.g.
pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
data : OrderedDict or DataFrame
Contains the following keys/columns:
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
* ``kt``: Ratio of global to extraterrestrial irradiance
on a horizontal plane.
References
----------
.. [1] John Boland, Lynne Scott, and Mark Luther, Modelling the diffuse
fraction of global solar radiation on a horizontal surface,
Environmetrics 12(2), pp 103-116, 2001,
:doi:`10.1002/1099-095X(200103)12:2%3C103::AID-ENV447%3E3.0.CO;2-2`
.. [2] J. Boland, B. Ridley (2008) Models of Diffuse Solar Fraction. In:
Badescu V. (eds) Modeling Solar Radiation at the Earth’s Surface.
Springer, Berlin, Heidelberg. :doi:`10.1007/978-3-540-77455-6_8`
See also
--------
dirint
disc
erbs
"""
dni_extra = get_extra_radiation(datetime_or_doy)
kt = clearness_index(ghi, zenith, dni_extra, min_cos_zenith=min_cos_zenith,
max_clearness_index=1)
# Boland equation
df = 1.0 / (1.0 + np.exp(-5.0 + 8.6 * kt))
# NOTE: [1] has different coefficients, for different time intervals
# 15-min: df = 1 / (1 + exp(8.645 * (kt - 0.613)))
# 1-hour: df = 1 / (1 + exp(7.997 * (kt - 0.586)))
dhi = df * ghi
dni = (ghi - dhi) / tools.cosd(zenith)
bad_values = (zenith > max_zenith) | (ghi < 0) | (dni < 0)
dni = np.where(bad_values, 0, dni)
# ensure that closure relationship remains valid
dhi = np.where(bad_values, ghi, dhi)
data = OrderedDict()
data['dni'] = dni
data['dhi'] = dhi
data['kt'] = kt
if isinstance(datetime_or_doy, pd.DatetimeIndex):
data = pd.DataFrame(data, index=datetime_or_doy)
return data
|
def boland(ghi, zenith, datetime_or_doy, min_cos_zenith=0.065, max_zenith=87):
r"""
Estimate DNI and DHI from GHI using the Boland clearness index model.
The Boland model [1]_, [2]_ estimates the diffuse fraction, DF, from global
horizontal irradiance, GHI, through an empirical relationship between DF
and the clearness index, :math:`k_t`, the ratio of GHI to extraterrestrial irradiance.
.. math::
\mathit{DF} = \frac{1}{1 + \exp\left(-5 + 8.6 k_t\right)}
where :math:`k_t` is the clearness index.
Parameters
----------
ghi: numeric
Global horizontal irradiance in W/m^2.
zenith: numeric
True (not refraction-corrected) zenith angles in decimal degrees.
datetime_or_doy : int, float, array, pd.DatetimeIndex
Day of year or array of days of year e.g.
pd.DatetimeIndex.dayofyear, or pd.DatetimeIndex.
min_cos_zenith : numeric, default 0.065
Minimum value of cos(zenith) to allow when calculating global
clearness index `kt`. Equivalent to zenith = 86.273 degrees.
max_zenith : numeric, default 87
Maximum value of zenith to allow in DNI calculation. DNI will be
set to 0 for times with zenith values greater than `max_zenith`.
Returns
-------
data : OrderedDict or DataFrame
Contains the following keys/columns:
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
* ``kt``: Ratio of global to extraterrestrial irradiance
on a horizontal plane.
References
----------
.. [1] John Boland, Lynne Scott, and Mark Luther, Modelling the diffuse
fraction of global solar radiation on a horizontal surface,
Environmetrics 12(2), pp 103-116, 2001,
:doi:`10.1002/1099-095X(200103)12:2%3C103::AID-ENV447%3E3.0.CO;2-2`
.. [2] J. Boland, B. Ridley (2008) Models of Diffuse Solar Fraction. In:
Badescu V. (eds) Modeling Solar Radiation at the Earth’s Surface.
Springer, Berlin, Heidelberg. :doi:`10.1007/978-3-540-77455-6_8`
See also
--------
dirint
disc
erbs
"""
dni_extra = get_extra_radiation(datetime_or_doy)
kt = clearness_index(ghi, zenith, dni_extra, min_cos_zenith=min_cos_zenith,
max_clearness_index=1)
# Boland equation
df = 1.0 / (1.0 + np.exp(-5.0 + 8.6 * kt))
# NOTE: [1] has different coefficients, for different time intervals
# 15-min: df = 1 / (1 + exp(8.645 * (kt - 0.613)))
# 1-hour: df = 1 / (1 + exp(7.997 * (kt - 0.586)))
dhi = df * ghi
dni = (ghi - dhi) / tools.cosd(zenith)
bad_values = (zenith > max_zenith) | (ghi < 0) | (dni < 0)
dni = np.where(bad_values, 0, dni)
# ensure that closure relationship remains valid
dhi = np.where(bad_values, ghi, dhi)
data = OrderedDict()
data['dni'] = dni
data['dhi'] = dhi
data['kt'] = kt
if isinstance(datetime_or_doy, pd.DatetimeIndex):
data = pd.DataFrame(data, index=datetime_or_doy)
return data
|
37,919 |
def load_libgmt(lib_fullnames=None):
"""
Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`.
Will look for the GMT shared library in the directories determined by
clib_full_names().
Parameters
----------
lib_fullnames : list of str or None
List of possible full names of GMT's shared library. If ``None``, will
default to ``clib_full_names()``.
Returns
-------
:py:class:`ctypes.CDLL` object
The loaded shared library.
Raises
------
GMTCLibNotFoundError
If there was any problem loading the library (couldn't find it or
couldn't access the functions).
"""
if lib_fullnames is None:
lib_fullnames = clib_full_names()
error = True
error_msg = []
failing_libs = []
for libname in lib_fullnames:
try:
if libname in failing_libs: # libname is known to fail, so skip it
continue
libgmt = ctypes.CDLL(libname)
check_libgmt(libgmt)
error = False
break
except (OSError, GMTCLibError) as err:
error_msg.append(
f"Error loading the GMT shared library '{libname}'.\n{err}"
)
failing_libs.append(libname)
if error:
raise GMTCLibNotFoundError("\n".join(error_msg))
return libgmt
|
def load_libgmt(lib_fullnames=None):
"""
Find and load ``libgmt`` as a :py:class:`ctypes.CDLL`.
Will look for the GMT shared library in the directories determined by
clib_full_names().
Parameters
----------
lib_fullnames : list of str or None
List of possible full names of GMT's shared library. If ``None``, will
default to ``clib_full_names()``.
Returns
-------
:py:class:`ctypes.CDLL` object
The loaded shared library.
Raises
------
GMTCLibNotFoundError
If there was any problem loading the library (couldn't find it or
couldn't access the functions).
"""
if lib_fullnames is None:
lib_fullnames = clib_full_names()
error = True
error_msg = []
failing_libs = []
for libname in lib_fullnames:
try:
if libname in failing_libs: # libname is known to fail, so skip it
continue
libgmt = ctypes.CDLL(libname)
check_libgmt(libgmt)
error = False
break
except (OSError, GMTCLibError) as err:
error_msg.append(
f"Error loading the GMT shared library '{libname}'.\n{err}"
)
failing_libs.append(libname)
if error_msg:
raise GMTCLibNotFoundError("\n".join(error_msg))
return libgmt
|
17,156 |
def _check_control_messages(vehicle: MyBMWVehicle) -> dict[str, Any]:
extra_attributes: dict[str, Any] = {}
for message in vehicle.check_control_messages.messages:
if (
message.description_short not in ALLOWED_CHECK_CONTROL_MESSAGE_KEYS
and message.description_short not in LOGGED_CHECK_CONTROL_MESSAGE_WARINGS
):
_LOGGER.warning(
"'%s' not an allowed check control message (%s)",
message.description_short,
message,
)
LOGGED_CHECK_CONTROL_MESSAGE_WARINGS.add(message.description_short)
continue
extra_attributes[message.description_short.lower()] = message.state.value
return extra_attributes
|
def _check_control_messages(vehicle: MyBMWVehicle) -> dict[str, Any]:
extra_attributes: dict[str, Any] = {}
for message in vehicle.check_control_messages.messages:
if (
message.description_short not in ALLOWED_CHECK_CONTROL_MESSAGE_KEYS
and message.description_short not in LOGGED_CHECK_CONTROL_MESSAGE_WARNINGS
):
_LOGGER.warning(
"'%s' not an allowed check control message (%s)",
message.description_short,
message,
)
LOGGED_CHECK_CONTROL_MESSAGE_WARINGS.add(message.description_short)
continue
extra_attributes[message.description_short.lower()] = message.state.value
return extra_attributes
|
25,766 |
def run_and_read_cplex(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the cplex
solver. If the solution is sucessful it returns variable solutions and
constraint dual values. Cplex must be installed for using this function
"""
import cplex
m = cplex.Cplex()
out = m.set_log_stream(solver_logfile)
if solver_options is not None:
for key, value in solver_options.items():
getattr(m.parameters, key).set(value)
m.read(problem_fn)
if warmstart:
m.start.read_basis(warmstart)
m.solve()
is_lp = m.problem_type[m.get_problem_type()] == 'LP'
termination_condition = m.solution.get_status_string()
if 'optimal' in termination_condition:
status = 'ok'
termination_condition = 'optimal'
else:
status = 'warning'
if (status == 'ok') and store_basis and is_lp:
n.basis_fn = solution_fn.replace('.sol', '.bas')
m.solution.basis.write(n.basis_fn)
objective = m.solution.get_objective_value()
sol = pd.Series(m.solution.get_values(), m.variables.get_names())
if is_lp:
dual = pd.Series(m.solution.get_dual_values(),
m.linear_constraints.get_names())
else:
logger.warning("Shadow prices of MILP couldn't be parsed")
dual = pd.Series(index=m.linear_constraints.get_names())
return (status, termination_condition, sol, dual, objective)
|
def run_and_read_cplex(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the cplex
solver. If the solution is sucessful it returns variable solutions and
constraint dual values. Cplex must be installed for using this function
"""
import cplex
m = cplex.Cplex()
out = m.set_log_stream(solver_logfile)
if solver_options is not None:
for key, value in solver_options.items():
getattr(m.parameters, key).set(value)
m.read(problem_fn)
if warmstart:
m.start.read_basis(warmstart)
m.solve()
is_lp = m.problem_type[m.get_problem_type()] == 'LP'
termination_condition = m.solution.get_status_string()
if 'optimal' in termination_condition:
status = 'ok'
termination_condition = 'optimal'
else:
status = 'warning'
if (status == 'ok') and store_basis and is_lp:
n.basis_fn = solution_fn.replace('.sol', '.bas')
m.solution.basis.write(n.basis_fn)
objective = m.solution.get_objective_value()
sol = pd.Series(m.solution.get_values(), m.variables.get_names()).pipe(set_int_index)
if is_lp:
dual = pd.Series(m.solution.get_dual_values(),
m.linear_constraints.get_names())
else:
logger.warning("Shadow prices of MILP couldn't be parsed")
dual = pd.Series(index=m.linear_constraints.get_names())
return (status, termination_condition, sol, dual, objective)
|
5,749 |
def linregress(x, y=None):
r"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if y is None:
x = ma.array(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
raise ValueError("If only `x` is given as input, "
"it has to be of shape (2, N) or (N, 2), "
f"provided shape was {x.shape}")
else:
x = ma.array(x)
y = ma.array(y)
x = x.flatten()
y = y.flatten()
if np.amax(x) == np.amin(x) and len(x) > 1:
raise ValueError("Cannot calculate a linear regression "
"if all x values are identical")
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
result = stats_linregress(x.data[~m], y.data[~m])
else:
# All data is masked
result = stats_LinregressResult(slope=None, intercept=None,
rvalue=None, pvalue=None,
stderr=None,
intercept_stderr=None)
else:
result = stats_linregress(x.data, y.data)
return result
|
def linregress(x, y=None):
r"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if y is None:
x = ma.array(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
raise ValueError("If only `x` is given as input, "
"it has to be of shape (2, N) or (N, 2), "
f"provided shape was {x.shape}")
else:
x = ma.array(x)
y = ma.array(y)
x = x.flatten()
y = y.flatten()
if np.amax(x) == np.amin(x):
raise ValueError("Cannot calculate a linear regression "
"if all x values are identical")
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
result = stats_linregress(x.data[~m], y.data[~m])
else:
# All data is masked
result = stats_LinregressResult(slope=None, intercept=None,
rvalue=None, pvalue=None,
stderr=None,
intercept_stderr=None)
else:
result = stats_linregress(x.data, y.data)
return result
|
8,173 |
def print_map(generic_map):
out = generic_map.__repr__()
assert isinstance(out, str)
|
def test_print_map(generic_map):
out = generic_map.__repr__()
assert isinstance(out, str)
|
51,279 |
def remove_configured_db_url_if_not_needed(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
"""Remove db url from config if it matches recorder database."""
if entry.options[CONF_DB_URL] == get_instance(hass).db_url:
new_options = {**entry.options, **{CONF_DB_URL: None}}
hass.config_entries.async_update_entry(
entry,
options=new_options,
)
|
def remove_configured_db_url_if_not_needed(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
"""Remove db url from config if it matches recorder database."""
new_options = {**entry.options, **{CONF_DB_URL: None}}
hass.config_entries.async_update_entry(
entry,
options=new_options,
)
|
7,155 |
def _handle_input(image, selem, out, mask, out_dtype=None, pixel_size=1):
"""Preprocess and verify input for filters.rank methods.
Parameters
----------
image : 2-D array (integer, float or boolean)
Input image.
selem : 2-D array (integer, float or boolean)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer, float or boolean)
If None, a new array is allocated.
mask : ndarray (integer, float or boolean)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
out_dtype : data-type
Desired output data-type. Default is None, which means we cast output
in input dtype.
pixel_size : int
Dimension of each pixel. Default value is 1.
Returns
-------
image : 2-D array (np.uint8 or np.uint16)
selem : 2-D array (np.uint8)
The neighborhood expressed as a binary 2-D array.
out : 3-D array (same dtype out_dtype or as input)
Output array. The two first dimensions are the spatial ones, the third
one is the pixel vector (length 1 by default).
mask : 2-D array (np.uint8)
Mask array that defines (>0) area of the image included in the local
neighborhood.
n_bins : int
Number of histogram bins.
out_dtype : data-type
Output data-type.
"""
assert_nD(image, 2)
if image.dtype not in (np.uint8, np.uint16, np.bool_):
message = ('Possible precision loss converting image of type {} to '
'uint8 as required by rank filters. Convert manually using '
'skimage.util.img_as_ubyte to silence this warning.'
.format(image.dtype))
warn(message, stacklevel=2)
image = img_as_ubyte(image)
if out_dtype is None:
out_dtype = image.dtype
selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
if out is None:
out = np.empty(image.shape + (pixel_size,), dtype=out_dtype)
else:
if len(out.shape) == 2:
out = out.reshape(out.shape+(pixel_size,))
if image.dtype in (np.uint8, np.int8):
n_bins = 256
else:
# Convert to a Python int to avoid the potential overflow when we add
# 1 to the maximum of the image.
n_bins = int(max(3, image.max())) + 1
if n_bins > 2**10:
warn("Bad rank filter performance is expected due to a "
"large number of bins ({}), equivalent to an approximate "
"bitdepth of {:.1f}.".format(n_bins, np.log2(n_bins)),
stacklevel=2)
return image, selem, out, mask, n_bins, out_dtype
|
def _handle_input(image, selem, out, mask, out_dtype=None, pixel_size=1):
"""Preprocess and verify input for filters.rank methods.
Parameters
----------
image : 2-D array (integer, float or boolean)
Input image.
selem : 2-D array (integer, float or boolean)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer, float or boolean)
If None, a new array is allocated.
mask : ndarray (integer, float or boolean)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
out_dtype : data-type
Desired output data-type. Default is None, which means we cast output
in input dtype.
pixel_size : int, optional
Dimension of each pixel. Default value is 1.
Returns
-------
image : 2-D array (np.uint8 or np.uint16)
selem : 2-D array (np.uint8)
The neighborhood expressed as a binary 2-D array.
out : 3-D array (same dtype out_dtype or as input)
Output array. The two first dimensions are the spatial ones, the third
one is the pixel vector (length 1 by default).
mask : 2-D array (np.uint8)
Mask array that defines (>0) area of the image included in the local
neighborhood.
n_bins : int
Number of histogram bins.
out_dtype : data-type
Output data-type.
"""
assert_nD(image, 2)
if image.dtype not in (np.uint8, np.uint16, np.bool_):
message = ('Possible precision loss converting image of type {} to '
'uint8 as required by rank filters. Convert manually using '
'skimage.util.img_as_ubyte to silence this warning.'
.format(image.dtype))
warn(message, stacklevel=2)
image = img_as_ubyte(image)
if out_dtype is None:
out_dtype = image.dtype
selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
if out is None:
out = np.empty(image.shape + (pixel_size,), dtype=out_dtype)
else:
if len(out.shape) == 2:
out = out.reshape(out.shape+(pixel_size,))
if image.dtype in (np.uint8, np.int8):
n_bins = 256
else:
# Convert to a Python int to avoid the potential overflow when we add
# 1 to the maximum of the image.
n_bins = int(max(3, image.max())) + 1
if n_bins > 2**10:
warn("Bad rank filter performance is expected due to a "
"large number of bins ({}), equivalent to an approximate "
"bitdepth of {:.1f}.".format(n_bins, np.log2(n_bins)),
stacklevel=2)
return image, selem, out, mask, n_bins, out_dtype
|
30,390 |
def decrypt_email_body(client: Client, args: Dict):
""" Decrypt the message
Args:
client: Client
args: Dict
"""
encrypt_message = demisto.getFilePath(args.get('encrypt_message'))
# Load private key and cert.
client.smime.load_key(client.private_key_file, client.public_key_file)
# Load the encrypted data.
p7, data = SMIME.smime_load_pkcs7(encrypt_message['path'])
# Decrypt p7.
out = client.smime.decrypt(p7).decode('utf-8')
entry_context = {
'SMIME': {
'Message': out,
}
}
human_readable = f'The encrypted message is: \n{out}'
return human_readable, entry_context
|
def decrypt_email_body(client: Client, args: Dict):
""" Decrypt the message
Args:
client: Client
args: Dict
"""
encrypt_message = demisto.getFilePath(args.get('encrypt_message'))
# Load private key and cert.
client.smime.load_key(client.private_key_file, client.public_key_file)
# Load the encrypted data.
p7, data = SMIME.smime_load_pkcs7(encrypt_message['path'])
# Decrypt p7.
out = client.smime.decrypt(p7).decode('utf-8')
entry_context = {
'SMIME': {
'Message': out,
}
}
human_readable = f'The decrypted message is: \n{out}'
return human_readable, entry_context
|
28,608 |
def plot_khat(
khats,
color="C0",
xlabels=False,
show_hlines=False,
show_bins=False,
bin_format="{1:.1f}%",
annotate=False,
threshold=None,
hover_label=False,
hover_format="{1}",
figsize=None,
textsize=None,
coords=None,
legend=False,
markersize=None,
ax=None,
hlines_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
**kwargs
):
r"""
Plot Pareto tail indices for diagnosing convergence.
Parameters
----------
khats : ELPDData containing Pareto shapes information or array of
Pareto tail indices.
color : str or array_like, optional
Colors of the scatter plot, if color is a str all dots will
have the same color, if it is the size of the observations,
each dot will have the specified color, otherwise, it will be
interpreted as a list of the dims to be used for the color
code. If Matplotlib c argument is passed, it will override
the color argument
xlabels : bool, optional
Use coords as xticklabels
show_hlines : bool, optional
Show the horizontal lines, by default at the values [0, 0.5, 0.7, 1].
show_bins : bool, optional
Show the percentage of khats falling in each bin, as delimited by hlines.
bin_format : str, optional
The string is used as formatting guide calling ``bin_format.format(count, pct)``.
threshold : float, optional
Show the labels of k values larger than threshold. Defaults to `None`,
no observations will be highlighted.
hover_label : bool, optional
Show the datapoint label when hovering over it with the mouse. Requires an interactive
backend.
hover_format : str, optional
String used to format the hover label via ``hover_format.format(idx, coord_label)``
figsize : tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
coords : mapping, optional
Coordinates of points to plot. **All** values are used for computation, but only a
a subset can be plotted for convenience.
legend : bool, optional
Include a legend to the plot. Only taken into account when color argument is a dim name.
markersize: int, optional
markersize for scatter plot. Defaults to `None` in which case it will
be chosen based on autoscaling for figsize.
ax: axes, optional
Matplotlib axes or bokeh figures.
hlines_kwargs: dictionary, optional
Additional keywords passed to
:func:`matplotlib.axes.Axes.hlines`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
show : bool, optional
Call backend show function.
kwargs :
Additional keywords passed to
:func:`matplotlib.axes.Axes.scatter`.
Returns
-------
axes : matplotlib axes or bokeh figures
See Also
--------
psislw : Pareto smoothed importance sampling (PSIS).
Examples
--------
Plot estimated pareto shape parameters showing how many fall in each category.
.. plot::
:context: close-figs
>>> import arviz as az
>>> radon = az.load_arviz_data("radon")
>>> loo_radon = az.loo(radon, pointwise=True)
>>> az.plot_khat(loo_radon, show_bins=True)
Show xlabels
.. plot::
:context: close-figs
>>> centered_eight = az.load_arviz_data("centered_eight")
>>> khats = az.loo(centered_eight, pointwise=True).pareto_k
>>> az.plot_khat(khats, xlabels=True, threshold=1)
Use custom color scheme
.. plot::
:context: close-figs
>>> counties = radon.posterior.County[radon.constant_data.county_idx].values
>>> colors = [
... "blue" if county[-1] in ("A", "N") else "green" for county in counties
... ]
>>> az.plot_khat(loo_radon, color=colors)
Notes
-----
The Generalized Pareto distribution (GPD) may be used to diagnose
convergence rates for importance sampling. GPD has parameters
offset, scale, and shape. The shape parameter is usually denoted
with ``k``. ``k`` also tells how many finite moments the
distribution has. The pre-asymptotic convergence rate of
importance sampling can be estimated based on the fractional
number of finite moments of the importance ratio distribution. GPD
is fitted to the largest importance ratios and the estimated shape
parameter ``k``, i.e., ``\hat{k}` can then be used as a diagnostic
(most importantly if ``\hat{k} > 0.7``, then the convergence rate
is impractically low). See [1]_.
References
----------
.. [1] Vehtari, A., Simpson, D., Gelman, A., Yao, Y., Gabry, J.,
2019. Pareto Smoothed Importance Sampling. arXiv:1507.02646
[stat].
"""
if annotate:
_log.warning("annotate will be deprecated, please use threshold instead")
threshold = annotate
if coords is None:
coords = {}
if color is None:
color = "C0"
if isinstance(khats, np.ndarray):
khats = khats.flatten()
xlabels = False
legend = False
dims = []
else:
if isinstance(khats, ELPDData):
khats = khats.pareto_k
if not isinstance(khats, DataArray):
raise ValueError("Incorrect khat data input. Check the documentation")
khats = get_coords(khats, coords)
dims = khats.dims
n_data_points = khats.size
xdata = np.arange(n_data_points)
if isinstance(khats, DataArray):
coord_labels = format_coords_as_labels(khats)
else:
coord_labels = xdata.astype(str)
plot_khat_kwargs = dict(
hover_label=hover_label,
hover_format=hover_format,
ax=ax,
figsize=figsize,
xdata=xdata,
khats=khats,
kwargs=kwargs,
threshold=threshold,
coord_labels=coord_labels,
show_hlines=show_hlines,
show_bins=show_bins,
hlines_kwargs=hlines_kwargs,
xlabels=xlabels,
legend=legend,
color=color,
dims=dims,
textsize=textsize,
markersize=markersize,
n_data_points=n_data_points,
bin_format=bin_format,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_khat", "khatplot", backend)
axes = plot(**plot_khat_kwargs)
return axes
|
def plot_khat(
khats,
color="C0",
xlabels=False,
show_hlines=False,
show_bins=False,
bin_format="{1:.1f}%",
annotate=False,
threshold=None,
hover_label=False,
hover_format="{1}",
figsize=None,
textsize=None,
coords=None,
legend=False,
markersize=None,
ax=None,
hlines_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
**kwargs
):
r"""
Plot Pareto tail indices for diagnosing convergence.
Parameters
----------
khats : ELPDData containing Pareto shapes information or array of
Pareto tail indices.
color : str or array_like, optional
Colors of the scatter plot, if color is a str all dots will
have the same color, if it is the size of the observations,
each dot will have the specified color, otherwise, it will be
interpreted as a list of the dims to be used for the color
code. If Matplotlib c argument is passed, it will override
the color argument
xlabels : bool, optional
Use coords as xticklabels
show_hlines : bool, optional
Show the horizontal lines, by default at the values [0, 0.5, 0.7, 1].
show_bins : bool, optional
Show the percentage of khats falling in each bin, as delimited by hlines.
bin_format : str, optional
The string is used as formatting guide calling ``bin_format.format(count, pct)``.
threshold : float, optional
Show the labels of k values larger than threshold. Defaults to `None`,
no observations will be highlighted.
hover_label : bool, optional
Show the datapoint label when hovering over it with the mouse. Requires an interactive
backend.
hover_format : str, optional
String used to format the hover label via ``hover_format.format(idx, coord_label)``
figsize : tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
coords : mapping, optional
Coordinates of points to plot. **All** values are used for computation, but only a
a subset can be plotted for convenience.
legend : bool, optional
Include a legend to the plot. Only taken into account when color argument is a dim name.
markersize: int, optional
markersize for scatter plot. Defaults to `None` in which case it will
be chosen based on autoscaling for figsize.
ax: axes, optional
Matplotlib axes or bokeh figures.
hlines_kwargs: dictionary, optional
Additional keywords passed to
:func:`matplotlib.axes.Axes.hlines`.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
show : bool, optional
Call backend show function.
kwargs :
Additional keywords passed to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
axes : matplotlib axes or bokeh figures
See Also
--------
psislw : Pareto smoothed importance sampling (PSIS).
Examples
--------
Plot estimated pareto shape parameters showing how many fall in each category.
.. plot::
:context: close-figs
>>> import arviz as az
>>> radon = az.load_arviz_data("radon")
>>> loo_radon = az.loo(radon, pointwise=True)
>>> az.plot_khat(loo_radon, show_bins=True)
Show xlabels
.. plot::
:context: close-figs
>>> centered_eight = az.load_arviz_data("centered_eight")
>>> khats = az.loo(centered_eight, pointwise=True).pareto_k
>>> az.plot_khat(khats, xlabels=True, threshold=1)
Use custom color scheme
.. plot::
:context: close-figs
>>> counties = radon.posterior.County[radon.constant_data.county_idx].values
>>> colors = [
... "blue" if county[-1] in ("A", "N") else "green" for county in counties
... ]
>>> az.plot_khat(loo_radon, color=colors)
Notes
-----
The Generalized Pareto distribution (GPD) may be used to diagnose
convergence rates for importance sampling. GPD has parameters
offset, scale, and shape. The shape parameter is usually denoted
with ``k``. ``k`` also tells how many finite moments the
distribution has. The pre-asymptotic convergence rate of
importance sampling can be estimated based on the fractional
number of finite moments of the importance ratio distribution. GPD
is fitted to the largest importance ratios and the estimated shape
parameter ``k``, i.e., ``\hat{k}` can then be used as a diagnostic
(most importantly if ``\hat{k} > 0.7``, then the convergence rate
is impractically low). See [1]_.
References
----------
.. [1] Vehtari, A., Simpson, D., Gelman, A., Yao, Y., Gabry, J.,
2019. Pareto Smoothed Importance Sampling. arXiv:1507.02646
[stat].
"""
if annotate:
_log.warning("annotate will be deprecated, please use threshold instead")
threshold = annotate
if coords is None:
coords = {}
if color is None:
color = "C0"
if isinstance(khats, np.ndarray):
khats = khats.flatten()
xlabels = False
legend = False
dims = []
else:
if isinstance(khats, ELPDData):
khats = khats.pareto_k
if not isinstance(khats, DataArray):
raise ValueError("Incorrect khat data input. Check the documentation")
khats = get_coords(khats, coords)
dims = khats.dims
n_data_points = khats.size
xdata = np.arange(n_data_points)
if isinstance(khats, DataArray):
coord_labels = format_coords_as_labels(khats)
else:
coord_labels = xdata.astype(str)
plot_khat_kwargs = dict(
hover_label=hover_label,
hover_format=hover_format,
ax=ax,
figsize=figsize,
xdata=xdata,
khats=khats,
kwargs=kwargs,
threshold=threshold,
coord_labels=coord_labels,
show_hlines=show_hlines,
show_bins=show_bins,
hlines_kwargs=hlines_kwargs,
xlabels=xlabels,
legend=legend,
color=color,
dims=dims,
textsize=textsize,
markersize=markersize,
n_data_points=n_data_points,
bin_format=bin_format,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_khat", "khatplot", backend)
axes = plot(**plot_khat_kwargs)
return axes
|
55,228 |
def sync(loop, func, *args, timeout=None, **kwargs):
"""
Make loop run coroutine until it returns. Runs in other thread
Example usage:
ffspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, timeout, **kwargs)
"""
timeout = timeout if timeout else None # convert 0 or 0.0 to None
# NB: if the loop is not running *yet*, it is OK to submit work
# and we will wait for it
if loop is None or loop.is_closed():
raise RuntimeError("Loop is not running")
try:
loop0 = asyncio.events.get_running_loop()
if loop0 is loop:
raise NotImplementedError("Calling sync() from within a running loop")
except RuntimeError:
pass
coro = func(*args, **kwargs)
result = [None]
event = threading.Event()
asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
while True:
# this loops allows thread to get interrupted
if event.wait(1):
break
if timeout is not None:
timeout -= 1
if timeout < 0:
raise FSTimeoutError
return_result = result[0]
if isinstance(return_result, asyncio.TimeoutError):
# suppress asyncio.TimeoutError, raise FSTimeoutError
raise FSTimeoutError from return_result
elif isinstance(return_result, BaseException):
raise return_result
else:
return return_result
|
def sync(loop, func, *args, timeout=None, **kwargs):
"""
Make loop run coroutine until it returns. Runs in other thread
Example usage:
fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, timeout, **kwargs)
"""
timeout = timeout if timeout else None # convert 0 or 0.0 to None
# NB: if the loop is not running *yet*, it is OK to submit work
# and we will wait for it
if loop is None or loop.is_closed():
raise RuntimeError("Loop is not running")
try:
loop0 = asyncio.events.get_running_loop()
if loop0 is loop:
raise NotImplementedError("Calling sync() from within a running loop")
except RuntimeError:
pass
coro = func(*args, **kwargs)
result = [None]
event = threading.Event()
asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
while True:
# this loops allows thread to get interrupted
if event.wait(1):
break
if timeout is not None:
timeout -= 1
if timeout < 0:
raise FSTimeoutError
return_result = result[0]
if isinstance(return_result, asyncio.TimeoutError):
# suppress asyncio.TimeoutError, raise FSTimeoutError
raise FSTimeoutError from return_result
elif isinstance(return_result, BaseException):
raise return_result
else:
return return_result
|
14,560 |
def match_data(g_pool, pupil_list, ref_list):
"""
Returns binocular and monocular matched pupil datums and ref points.
Uses a dispersion criterion to dismiss matches which are too far apart.
"""
if not (pupil_list or ref_list):
not_enough_data_error_msg = (
"Not enough ref point or pupil data available for calibration."
)
logger.error(not_enough_data_error_msg)
return {
"subject": "calibration.failed",
"reason": not_enough_data_error_msg,
"timestamp": g_pool.get_timestamp(),
"record": True,
}
pupil0 = [p for p in pupil_list if p["id"] == 0]
pupil1 = [p for p in pupil_list if p["id"] == 1]
matched_binocular_data = closest_matches_binocular(ref_list, pupil_list)
matched_pupil0_data = closest_matches_monocular(ref_list, pupil0)
matched_pupil1_data = closest_matches_monocular(ref_list, pupil1)
if len(matched_pupil0_data) > len(matched_pupil1_data):
matched_monocular_data = matched_pupil0_data
else:
matched_monocular_data = matched_pupil1_data
logger.info(
"Collected {} monocular calibration data.".format(len(matched_monocular_data))
)
logger.info(
"Collected {} binocular calibration data.".format(len(matched_binocular_data))
)
return (
matched_binocular_data,
matched_monocular_data,
matched_pupil0_data,
matched_pupil1_data,
pupil0,
pupil1,
)
|
def match_data(g_pool, pupil_list, ref_list):
"""
Returns binocular and monocular matched pupil datums and ref points.
Uses a dispersion criterion to dismiss matches which are too far apart.
"""
if not (pupil_list or ref_list):
not_enough_data_error_msg = (
"Not enough ref point or pupil data available for calibration."
)
logger.error(not_enough_data_error_msg)
return {
"subject": "calibration.failed",
"reason": not_enough_data_error_msg,
"timestamp": g_pool.get_timestamp(),
"record": True,
}
pupil0 = [p for p in pupil_list if p["id"] == 0]
pupil1 = [p for p in pupil_list if p["id"] == 1]
matched_binocular_data = closest_matches_binocular(ref_list, pupil_list)
matched_pupil0_data = closest_matches_monocular(ref_list, pupil0)
matched_pupil1_data = closest_matches_monocular(ref_list, pupil1)
if len(matched_pupil0_data) > len(matched_pupil1_data):
matched_monocular_data = matched_pupil0_data
else:
matched_monocular_data = matched_pupil1_data
logger.info(
f"Collected {len(matched_monocular_data)} monocular calibration data."
)
logger.info(
"Collected {} binocular calibration data.".format(len(matched_binocular_data))
)
return (
matched_binocular_data,
matched_monocular_data,
matched_pupil0_data,
matched_pupil1_data,
pupil0,
pupil1,
)
|
28,027 |
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
subcommands = parser.add_subparsers(title='available actions')
# Create handlers for individual subcommands.
runs = subcommands.add_parser(
'runs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="List the analysis runs available on the server.",
help="List the available analysis runs.")
__register_runs(runs)
runs.set_defaults(func=cmd_line_client.handle_list_runs)
__add_common_arguments(runs, output_formats=DEFAULT_OUTPUT_FORMATS)
run_histories = subcommands.add_parser(
'history',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Show run history for some analysis runs.",
help="Show run history of multiple runs.")
__register_run_histories(run_histories)
run_histories.set_defaults(func=cmd_line_client.handle_list_run_histories)
__add_common_arguments(run_histories,
output_formats=DEFAULT_OUTPUT_FORMATS)
results = subcommands.add_parser(
'results',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="Show the individual analysis reports' summary.",
help="List analysis result (finding) summary for a given run.",
epilog='''Example scenario: List analysis results
------------------------------------------------
Get analysis results for a run:
CodeChecker cmd results my_run
Get analysis results for multiple runs:
CodeChecker cmd results my_run1 my_run2
Get analysis results by using regex:
CodeChecker cmd results "my_run*"
Get analysis results for a run and filter the analysis results:
CodeChecker cmd results my_run --severity critical high medium \\
--file "/home/username/my_project/*"
CodeChecker cmd results my_run --review-status confirmed unreviewed \\
--component my_component_name''')
__register_results(results)
results.set_defaults(func=cmd_line_client.handle_list_results)
__add_common_arguments(results, output_formats=DEFAULT_OUTPUT_FORMATS)
diff = subcommands.add_parser(
'diff',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="Compare two analysis runs to show the results that "
"differ between the two.",
help="Compare two analysis runs and show the difference.",
epilog='''
envionment variables:
CC_REPO_DIR Root directory of the sources, i.e. the directory where
the repository was cloned. Use it when generating gerrit
output.
CC_REPORT_URL URL where the report can be found. Use it when generating
gerrit output.
CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when
generating gerrit output.
Example scenario: Compare multiple analysis runs
------------------------------------------------
Compare two runs and show results that didn't exist in the 'run1' but appear in
the 'run2' run:
CodeChecker cmd diff -b run1 -n run2 --new
Compare a remote run with a local report directory and show results that didn't
exist in the remote run 'run1' but appear in the local report directory:
CodeChecker cmd diff -b run1 -n /my_report_dir --new
Compare two runs and show results that exist in both runs and filter results
by multiple severity values:
CodeChecker cmd diff -b run1 -n run2 --unresolved --severity high medium'''
)
__register_diff(diff)
diff_output_formats = DEFAULT_OUTPUT_FORMATS + ["html", "gerrit",
"codeclimate"]
output_help_msg = "R|The output format(s) to use in showing the data.\n" \
"- html: multiple html files will be generated in the " \
"export directory.\n" \
"- gerrit: a 'gerrit_review.json' file will be " \
"generated in the export directory.\n" \
"- codeclimate: a 'codeclimate_issues.json' file will " \
"be generated in the export directory.\n" \
"For the output formats (json, gerrit, codeclimate) " \
"if an export directory is set the output files will " \
"be generated if not the results are printed to the " \
"stdout but only if one format was selected."
__add_common_arguments(diff,
output_formats=diff_output_formats,
output_help_message=output_help_msg,
allow_multiple_outputs=True)
sum_p = subcommands.add_parser(
'sum',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="Show checker statistics for some analysis runs.",
help="Show statistics of checkers.",
epilog='''Example scenario: Get checker statistics
------------------------------------------------
Get statistics for a run:
CodeChecker cmd sum -n my_run
Get statistics for all runs filtered by multiple checker names:
CodeChecker cmd sum --all --checker-name "core.*" "deadcode.*"
Get statistics for all runs and only for severity 'high':
CodeChecker cmd sum --all --severity "high"''')
__register_sum(sum_p)
sum_p.set_defaults(func=cmd_line_client.handle_list_result_types)
__add_common_arguments(sum_p, output_formats=DEFAULT_OUTPUT_FORMATS)
token = subcommands.add_parser(
'token',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Access subcommands related to configuring personal "
"access tokens managed by a CodeChecker server. Please "
"see the individual subcommands for details.",
help="Access subcommands related to configuring personal access "
"tokens managed by a CodeChecker server.")
__register_token(token)
del_p = subcommands.add_parser(
'del',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="""
Remove analysis runs from the server based on some criteria.
!!! WARNING !!! When a run is deleted, ALL associated information (reports,
files, run histories) is PERMANENTLY LOST! Please be careful with this command
because it can not be undone.
NOTE! You can't remove a snapshot of run (a run history), you can remove only
full runs.""",
help="Delete analysis runs.")
__register_delete(del_p)
del_p.set_defaults(func=cmd_line_client.handle_remove_run_results)
__add_common_arguments(del_p)
update_p = subcommands.add_parser(
'update',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Update the name of an analysis run.",
help="Update an analysis run.")
__register_update(update_p)
update_p.set_defaults(func=cmd_line_client.handle_update_run)
__add_common_arguments(update_p)
suppress = subcommands.add_parser(
'suppress',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Imports suppressions from a suppress file to a "
"CodeChecker server.",
help="Manage and import suppressions of a CodeChecker server.")
__register_suppress(suppress)
suppress.set_defaults(func=cmd_line_client.handle_suppress)
__add_common_arguments(suppress)
products = subcommands.add_parser(
'products',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="CodeChecker organises its databases into products. "
"Each product has an individually configured database "
"which stores the analysis results. These subcommands "
"are used to manage the products configured by the "
"server. Please see the individual subcommands for "
"details.",
epilog="Most of these commands require authentication and "
"appropriate access rights. Please see 'CodeChecker cmd "
"login' to authenticate.",
help="Access subcommands related to configuring the products managed "
"by a CodeChecker server.")
__register_products(products)
__add_common_arguments(products, needs_product_url=None)
components = subcommands.add_parser(
'components',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Source components are named collection of directories "
"specified as directory filter.",
help="Access subcommands related to configuring the source components "
"managed by a CodeChecker server.")
__register_source_components(components)
__add_common_arguments(components)
login = subcommands.add_parser(
'login',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Certain CodeChecker servers can require elevated "
"privileges to access analysis results. In such cases "
"it is mandatory to authenticate to the server. This "
"action is used to perform an authentication in the "
"command-line.",
help="Authenticate into CodeChecker servers that require privileges.")
__register_login(login)
login.set_defaults(func=cmd_line_client.handle_login)
__add_common_arguments(login, needs_product_url=False)
export = subcommands.add_parser(
'export',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Export the comments and review status from "
"codechecker server into a json format",
help="Export the analysis to a json file "
"for a given run"
)
__register_export(export)
export.set_defaults(func=cmd_line_client.handle_export)
__add_common_arguments(export, output_formats=['json'])
|
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
subcommands = parser.add_subparsers(title='available actions')
# Create handlers for individual subcommands.
runs = subcommands.add_parser(
'runs',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="List the analysis runs available on the server.",
help="List the available analysis runs.")
__register_runs(runs)
runs.set_defaults(func=cmd_line_client.handle_list_runs)
__add_common_arguments(runs, output_formats=DEFAULT_OUTPUT_FORMATS)
run_histories = subcommands.add_parser(
'history',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Show run history for some analysis runs.",
help="Show run history of multiple runs.")
__register_run_histories(run_histories)
run_histories.set_defaults(func=cmd_line_client.handle_list_run_histories)
__add_common_arguments(run_histories,
output_formats=DEFAULT_OUTPUT_FORMATS)
results = subcommands.add_parser(
'results',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="Show the individual analysis reports' summary.",
help="List analysis result (finding) summary for a given run.",
epilog='''Example scenario: List analysis results
------------------------------------------------
Get analysis results for a run:
CodeChecker cmd results my_run
Get analysis results for multiple runs:
CodeChecker cmd results my_run1 my_run2
Get analysis results by using regex:
CodeChecker cmd results "my_run*"
Get analysis results for a run and filter the analysis results:
CodeChecker cmd results my_run --severity critical high medium \\
--file "/home/username/my_project/*"
CodeChecker cmd results my_run --review-status confirmed unreviewed \\
--component my_component_name''')
__register_results(results)
results.set_defaults(func=cmd_line_client.handle_list_results)
__add_common_arguments(results, output_formats=DEFAULT_OUTPUT_FORMATS)
diff = subcommands.add_parser(
'diff',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="Compare two analysis runs to show the results that "
"differ between the two.",
help="Compare two analysis runs and show the difference.",
epilog='''
envionment variables:
CC_REPO_DIR Root directory of the sources, i.e. the directory where
the repository was cloned. Use it when generating gerrit
output.
CC_REPORT_URL URL where the report can be found. Use it when generating
gerrit output.
CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when
generating gerrit output.
Example scenario: Compare multiple analysis runs
------------------------------------------------
Compare two runs and show results that didn't exist in the 'run1' but appear in
the 'run2' run:
CodeChecker cmd diff -b run1 -n run2 --new
Compare a remote run with a local report directory and show results that didn't
exist in the remote run 'run1' but appear in the local report directory:
CodeChecker cmd diff -b run1 -n /my_report_dir --new
Compare two runs and show results that exist in both runs and filter results
by multiple severity values:
CodeChecker cmd diff -b run1 -n run2 --unresolved --severity high medium'''
)
__register_diff(diff)
diff_output_formats = DEFAULT_OUTPUT_FORMATS + ["html", "gerrit",
"codeclimate"]
output_help_msg = "R|The output format(s) to use in showing the data.\n" \
"- html: multiple html files will be generated in the " \
"export directory.\n" \
"- gerrit: a 'gerrit_review.json' file will be " \
"generated in the export directory.\n" \
"- codeclimate: a 'codeclimate_issues.json' file will " \
"be generated in the export directory.\n" \
"For the output formats (json, gerrit, codeclimate) " \
"if an export directory is set the output files will " \
"be generated if not the results are printed to the " \
"stdout but only if one format was selected."
__add_common_arguments(diff,
output_formats=diff_output_formats,
output_help_message=output_help_msg,
allow_multiple_outputs=True)
sum_p = subcommands.add_parser(
'sum',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="Show checker statistics for some analysis runs.",
help="Show statistics of checkers.",
epilog='''Example scenario: Get checker statistics
------------------------------------------------
Get statistics for a run:
CodeChecker cmd sum -n my_run
Get statistics for all runs filtered by multiple checker names:
CodeChecker cmd sum --all --checker-name "core.*" "deadcode.*"
Get statistics for all runs and only for severity 'high':
CodeChecker cmd sum --all --severity "high"''')
__register_sum(sum_p)
sum_p.set_defaults(func=cmd_line_client.handle_list_result_types)
__add_common_arguments(sum_p, output_formats=DEFAULT_OUTPUT_FORMATS)
token = subcommands.add_parser(
'token',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Access subcommands related to configuring personal "
"access tokens managed by a CodeChecker server. Please "
"see the individual subcommands for details.",
help="Access subcommands related to configuring personal access "
"tokens managed by a CodeChecker server.")
__register_token(token)
del_p = subcommands.add_parser(
'del',
formatter_class=arg.RawDescriptionDefaultHelpFormatter,
description="""
Remove analysis runs from the server based on some criteria.
!!! WARNING !!! When a run is deleted, ALL associated information (reports,
files, run histories) is PERMANENTLY LOST! Please be careful with this command
because it can not be undone.
NOTE! You can't remove a snapshot of run (a run history), you can remove only
full runs.""",
help="Delete analysis runs.")
__register_delete(del_p)
del_p.set_defaults(func=cmd_line_client.handle_remove_run_results)
__add_common_arguments(del_p)
update_p = subcommands.add_parser(
'update',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Update the name of an analysis run.",
help="Update an analysis run.")
__register_update(update_p)
update_p.set_defaults(func=cmd_line_client.handle_update_run)
__add_common_arguments(update_p)
suppress = subcommands.add_parser(
'suppress',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Imports suppressions from a suppress file to a "
"CodeChecker server.",
help="Manage and import suppressions of a CodeChecker server.")
__register_suppress(suppress)
suppress.set_defaults(func=cmd_line_client.handle_suppress)
__add_common_arguments(suppress)
products = subcommands.add_parser(
'products',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="CodeChecker organises its databases into products. "
"Each product has an individually configured database "
"which stores the analysis results. These subcommands "
"are used to manage the products configured by the "
"server. Please see the individual subcommands for "
"details.",
epilog="Most of these commands require authentication and "
"appropriate access rights. Please see 'CodeChecker cmd "
"login' to authenticate.",
help="Access subcommands related to configuring the products managed "
"by a CodeChecker server.")
__register_products(products)
__add_common_arguments(products, needs_product_url=None)
components = subcommands.add_parser(
'components',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Source components are named collection of directories "
"specified as directory filter.",
help="Access subcommands related to configuring the source components "
"managed by a CodeChecker server.")
__register_source_components(components)
__add_common_arguments(components)
login = subcommands.add_parser(
'login',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Certain CodeChecker servers can require elevated "
"privileges to access analysis results. In such cases "
"it is mandatory to authenticate to the server. This "
"action is used to perform an authentication in the "
"command-line.",
help="Authenticate into CodeChecker servers that require privileges.")
__register_login(login)
login.set_defaults(func=cmd_line_client.handle_login)
__add_common_arguments(login, needs_product_url=False)
export = subcommands.add_parser(
'export',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Export data (comments, review statuses) from a running CodeChecker server into a json format",
help="Export the analysis to a json file "
"for a given run"
)
__register_export(export)
export.set_defaults(func=cmd_line_client.handle_export)
__add_common_arguments(export, output_formats=['json'])
|
16,348 |
def get_provisioning_info(
msg: dict,
) -> str | ProvisioningEntry | QRProvisioningInformation | None:
"""Process provisioning parameters and return the appropriate value."""
if planned_provisioning_entry := msg.get(PLANNED_PROVISIIONING_ENTRY):
return ProvisioningEntry(
dsk=planned_provisioning_entry[DSK],
security_classes=[
SecurityClass(sec_cls)
for sec_cls in planned_provisioning_entry[SECURITY_CLASSES]
],
additional_properties={
k: v
for k, v in planned_provisioning_entry.items()
if k not in (DSK, SECURITY_CLASSES)
},
)
if qr_provisioning_information := msg.get(QR_PROVISIONING_INFORMATION):
protocols = [
Protocols(proto)
for proto in qr_provisioning_information.get(SUPPORTED_PROTOCOLS, [])
]
return QRProvisioningInformation(
version=QRCodeVersion(qr_provisioning_information[VERSION]),
security_classes=[
SecurityClass(sec_cls)
for sec_cls in qr_provisioning_information[SECURITY_CLASSES]
],
dsk=qr_provisioning_information[DSK],
generic_device_class=qr_provisioning_information[GENERIC_DEVICE_CLASS],
specific_device_class=qr_provisioning_information[SPECIFIC_DEVICE_CLASS],
installer_icon_type=qr_provisioning_information[INSTALLER_ICON_TYPE],
manufacturer_id=qr_provisioning_information[MANUFACTURER_ID],
product_type=qr_provisioning_information[PRODUCT_TYPE],
product_id=qr_provisioning_information[PRODUCT_ID],
application_version=qr_provisioning_information[APPLICATION_VERSION],
max_inclusion_request_interval=qr_provisioning_information.get(
MAX_INCLUSION_REQUEST_INTERVAL
),
uuid=qr_provisioning_information.get(UUID),
supported_protocols=protocols if protocols else None,
)
if qr_code_string := msg.get(QR_CODE_STRING):
return qr_code_string
return None
|
def get_provisioning_info(
msg: dict,
) -> str | ProvisioningEntry | QRProvisioningInformation | None:
"""Process provisioning parameters and return the appropriate value."""
if planned_provisioning_entry := msg.get(PLANNED_PROVISIONING_ENTRY):
return ProvisioningEntry(
dsk=planned_provisioning_entry[DSK],
security_classes=[
SecurityClass(sec_cls)
for sec_cls in planned_provisioning_entry[SECURITY_CLASSES]
],
additional_properties={
k: v
for k, v in planned_provisioning_entry.items()
if k not in (DSK, SECURITY_CLASSES)
},
)
if qr_provisioning_information := msg.get(QR_PROVISIONING_INFORMATION):
protocols = [
Protocols(proto)
for proto in qr_provisioning_information.get(SUPPORTED_PROTOCOLS, [])
]
return QRProvisioningInformation(
version=QRCodeVersion(qr_provisioning_information[VERSION]),
security_classes=[
SecurityClass(sec_cls)
for sec_cls in qr_provisioning_information[SECURITY_CLASSES]
],
dsk=qr_provisioning_information[DSK],
generic_device_class=qr_provisioning_information[GENERIC_DEVICE_CLASS],
specific_device_class=qr_provisioning_information[SPECIFIC_DEVICE_CLASS],
installer_icon_type=qr_provisioning_information[INSTALLER_ICON_TYPE],
manufacturer_id=qr_provisioning_information[MANUFACTURER_ID],
product_type=qr_provisioning_information[PRODUCT_TYPE],
product_id=qr_provisioning_information[PRODUCT_ID],
application_version=qr_provisioning_information[APPLICATION_VERSION],
max_inclusion_request_interval=qr_provisioning_information.get(
MAX_INCLUSION_REQUEST_INTERVAL
),
uuid=qr_provisioning_information.get(UUID),
supported_protocols=protocols if protocols else None,
)
if qr_code_string := msg.get(QR_CODE_STRING):
return qr_code_string
return None
|
1,747 |
def top_k_accuracy_score(y_true, y_score, k=5, normalize=True):
"""Top k Accuracy classification score.
This metric computes the number of times where the correct label is among
the top ``k`` labels predicted (ranked by predicted scores). Note that
multilabel classification case isn't handled here.
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores.
k : int, optional (default=5)
Number of guesses allowed to find the correct label.
normalize : bool, optional (default=True)
If ``True``, return the fraction of correctly classified samples.
Otherwise, return the number of correctly classified samples.
Returns
-------
score : float
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score
Notes
-----
If ``k = 1``, the result will be the same as the accuracy_score (though see
note below). If ``k`` is the same as the number of classes, this score will
be perfect and meaningless.
In cases where two or more labels are assigned equal probabilities, the
result may be incorrect if one of those labels falls at the threshold, as
one class must be chosen to be the ``k``th class and the class chosen may
not be the correct one.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import top_k_accuracy_score
>>> y_true = np.array([0, 1, 2, 2])
>>> y_score = np.array([[0.5, 0.2, 0.2],
... [0.3, 0.4, 0.2],
... [0.2, 0.4, 0.3],
... [0.7, 0.2, 0.1]])
>>> top_k_accuracy_score(y_true, y_score, k=1)
0.5
>>> top_k_accuracy_score(y_true, y_score, k=2)
0.75
>>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False)
3
>>> top_k_accuracy_score(y_true, y_score, k=3)
1.0
"""
check_consistent_length(y_true, y_score)
y_type = type_of_target(y_true)
if y_type != 'multiclass':
raise ValueError(f"Target type must be 'multiclass' not {y_type}")
y_true = column_or_1d(y_true)
y_score = check_array(y_score)
classes = _encode(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of columns "
"in 'y_score'"
)
sorted_pred = np.argsort(-y_score, axis=1)
score = sum(y in pred[:k] for y, pred in zip(y_true, sorted_pred))
score = score / len(y_true) if normalize else score
return score
|
def top_k_accuracy_score(y_true, y_score, k=5, normalize=True):
"""Top k Accuracy classification score.
This metric computes the number of times where the correct label is among
the top ``k`` labels predicted (ranked by predicted scores). Note that
multilabel classification case isn't handled here.
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores, as returned by :term:`decision_function` or :term:`predict_proba`
k : int, optional (default=5)
Number of guesses allowed to find the correct label.
normalize : bool, optional (default=True)
If ``True``, return the fraction of correctly classified samples.
Otherwise, return the number of correctly classified samples.
Returns
-------
score : float
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score
Notes
-----
If ``k = 1``, the result will be the same as the accuracy_score (though see
note below). If ``k`` is the same as the number of classes, this score will
be perfect and meaningless.
In cases where two or more labels are assigned equal probabilities, the
result may be incorrect if one of those labels falls at the threshold, as
one class must be chosen to be the ``k``th class and the class chosen may
not be the correct one.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import top_k_accuracy_score
>>> y_true = np.array([0, 1, 2, 2])
>>> y_score = np.array([[0.5, 0.2, 0.2],
... [0.3, 0.4, 0.2],
... [0.2, 0.4, 0.3],
... [0.7, 0.2, 0.1]])
>>> top_k_accuracy_score(y_true, y_score, k=1)
0.5
>>> top_k_accuracy_score(y_true, y_score, k=2)
0.75
>>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False)
3
>>> top_k_accuracy_score(y_true, y_score, k=3)
1.0
"""
check_consistent_length(y_true, y_score)
y_type = type_of_target(y_true)
if y_type != 'multiclass':
raise ValueError(f"Target type must be 'multiclass' not {y_type}")
y_true = column_or_1d(y_true)
y_score = check_array(y_score)
classes = _encode(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of columns "
"in 'y_score'"
)
sorted_pred = np.argsort(-y_score, axis=1)
score = sum(y in pred[:k] for y, pred in zip(y_true, sorted_pred))
score = score / len(y_true) if normalize else score
return score
|
2,664 |
def test_mnb_prior_unobserved_targets():
# test smoothing of prior for yet unobserved targets
# Create toy training data
X = np.array([[0, 1], [1, 0]])
y = np.array([0, 1])
clf = MultinomialNB()
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
clf.partial_fit(X, y, classes=[0, 1, 2])
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 0
# add a training example with previously unobserved class
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
clf.partial_fit([[1, 1]], [2])
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 2
|
def test_mnb_prior_unobserved_targets():
# test smoothing of prior for yet unobserved targets
# Create toy training data
X = np.array([[0, 1], [1, 0]])
y = np.array([0, 1])
clf = MultinomialNB()
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
clf.partial_fit(X, y, classes=[0, 1, 2])
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 0
# add a training example with previously unobserved class
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
clf.partial_fit([[1, 1]], [2])
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 2
|
33,617 |
def build_wsgi_environ(scope, body):
"""
Builds a scope and request body into a WSGI environ object.
This code snippet is taken from https://github.com/django/asgiref/blob
/36c3e8dc70bf38fe2db87ac20b514f21aaf5ea9d/asgiref/wsgi.py#L52
This function helps translate ASGI scope and body into a flask request.
"""
environ = {
"REQUEST_METHOD": scope["method"],
"SCRIPT_NAME": scope.get("root_path", ""),
"PATH_INFO": scope["path"],
"QUERY_STRING": scope["query_string"].decode("ascii"),
"SERVER_PROTOCOL": "HTTP/{}".format(scope["http_version"]),
"wsgi.version": (1, 0),
"wsgi.url_scheme": scope.get("scheme", "http"),
"wsgi.input": body,
"wsgi.errors": BytesIO(),
"wsgi.multithread": True,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
# Get server name and port - required in WSGI, not in ASGI
if "server" in scope:
environ["SERVER_NAME"] = scope["server"][0]
environ["SERVER_PORT"] = str(scope["server"][1])
else:
environ["SERVER_NAME"] = "localhost"
environ["SERVER_PORT"] = "80"
if "client" in scope:
environ["REMOTE_ADDR"] = scope["client"][0]
# Go through headers and make them into environ entries
for name, value in scope.get("headers", []):
name = name.decode("latin1")
if name == "content-length":
corrected_name = "CONTENT_LENGTH"
elif name == "content-type":
corrected_name = "CONTENT_TYPE"
else:
corrected_name = "HTTP_%s" % name.upper().replace("-", "_")
# HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
value = value.decode("latin1")
if corrected_name in environ:
value = environ[corrected_name] + "," + value
environ[corrected_name] = value
return environ
|
def build_wsgi_environ(scope, body):
"""
Builds a scope and request body into a WSGI environ object.
This code snippet is taken from https://github.com/django/asgiref/blob
/36c3e8dc70bf38fe2db87ac20b514f21aaf5ea9d/asgiref/wsgi.py#L52
This function helps translate ASGI scope and body into a flask request.
"""
environ = {
"REQUEST_METHOD": scope["method"],
"SCRIPT_NAME": scope.get("root_path", ""),
"PATH_INFO": scope["path"],
"QUERY_STRING": scope["query_string"].decode("ascii"),
"SERVER_PROTOCOL": "HTTP/{}".format(scope["http_version"]),
"wsgi.version": (1, 0),
"wsgi.url_scheme": scope.get("scheme", "http"),
"wsgi.input": body,
"wsgi.errors": BytesIO(),
"wsgi.multithread": True,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
# Get server name and port - required in WSGI, not in ASGI
if "server" in scope:
environ["SERVER_NAME"] = scope["server"][0]
environ["SERVER_PORT"] = str(scope["server"][1])
else:
environ["SERVER_NAME"] = "localhost"
environ["SERVER_PORT"] = "80"
if "client" in scope:
environ["REMOTE_ADDR"] = scope["client"][0]
# Transforms headers into environ entries.
for name, value in scope.get("headers", []):
name = name.decode("latin1")
if name == "content-length":
corrected_name = "CONTENT_LENGTH"
elif name == "content-type":
corrected_name = "CONTENT_TYPE"
else:
corrected_name = "HTTP_%s" % name.upper().replace("-", "_")
# HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
value = value.decode("latin1")
if corrected_name in environ:
value = environ[corrected_name] + "," + value
environ[corrected_name] = value
return environ
|
20,005 |
def run_sample_images(args):
plantcv.utils.sample_image(source_path=args.source, dest_path=args.outdir, num=args.number)
|
def run_sample_images(args):
plantcv.utils.sample_images(source_path=args.source, dest_path=args.outdir, num=args.number)
|
29,846 |
def _pack(
directory: str, *, compression: Optional[str] = None, output: Optional[str]
) -> None:
output_file = None
output_dir = None
# Output may be:
# (1) a directory path to output snaps to
# (2) an explicit file path to output snap to
# (3) unspecified (None), output to current directory (project directory)
if output:
output_path = pathlib.Path(output)
output_parent = output_path.parent
if output_path.is_dir():
output_dir = str(output_path)
elif output_parent and output_parent != pathlib.Path("."):
output_dir = str(output_parent)
output_file = output_path.name
else:
output_file = output
snap_path = file_utils.get_host_tool_path(command_name="snap", package_name="snapd")
command: List[Union[str, pathlib.Path]] = [snap_path, "pack"]
# When None, just use snap pack's default settings.
if compression is not None:
if compression != "xz":
echo.warning(
f"EXPERIMENTAL: Setting the squash FS compression to {compression!r}."
)
command.extend(["--compression", compression])
if output_file:
command.extend(["--filename", output_file])
command.append(directory)
if output_dir:
command.append(output_dir)
logger.debug(f"Running pack command: {command}")
snap_filename = _run_pack(command)
echo.info(f"Snapped {snap_filename}")
|
def _pack(
directory: str, *, compression: Optional[str] = None, output: Optional[str]
) -> None:
output_file = None
output_dir = None
# Output may be:
# (1) a directory path to output snaps to
# (2) an explicit file path to output snap to
# (3) unspecified (None), output to current directory (project directory)
if output:
output_path = pathlib.Path(output)
output_parent = output_path.parent
if output_path.is_dir():
output_dir = str(output_path)
elif output_parent and output_parent != pathlib.Path("."):
output_dir = str(output_parent)
output_file = output_path.name
else:
output_file = output
snap_path = file_utils.get_host_tool_path(command_name="snap", package_name="snapd")
command: List[Union[str, pathlib.Path]] = [snap_path, "pack"]
# When None, just use snap pack's default settings.
if compression is not None:
if compression != "xz":
echo.warning(
f"EXPERIMENTAL: Setting the squash FS compression to {compression!r}."
)
command.extend(["--compression", compression])
if output_file is not None:
command.extend(["--filename", output_file])
command.append(directory)
if output_dir:
command.append(output_dir)
logger.debug(f"Running pack command: {command}")
snap_filename = _run_pack(command)
echo.info(f"Snapped {snap_filename}")
|
33,050 |
def run_systemd_cmdline(args: MkosiArgs, commands: Sequence[str]) -> Sequence[str]:
return ["systemd-run", "--quiet", "--wait", "--pipe", "-M", machine_name(args), "/usr/bin/env", *commands]
|
def run_systemd_cmdline(args: MkosiArgs, commands: Sequence[str]) -> List[str]:
return ["systemd-run", "--quiet", "--wait", "--pipe", "-M", machine_name(args), "/usr/bin/env", *commands]
|
30,142 |
def dont_crash(msg, sign=False):
if sign:
while True:
try:
good_name = input(msg) # can't give an error. dab on the haiters
if good_name in ("+", "-", "/", "*"):
return good_name
else:
print("{0} not supported".fomrat(good_name))
except KeyboardInterrupt:
print("\nNo escape now!") # But still have to catch this one
else:
while True:
try:
return int(input(msg))
except KeyboardInterrupt:
print("\nNo escape now!")
except ValueError:
print("Are you smart enough to enter a number?! (I'm not)")
|
def dont_crash(msg, sign=False):
if sign:
while True:
try:
good_name = input(msg) # can't give an error. dab on the haiters
if good_name in ("+", "-", "/", "*"):
return good_name
else:
print("{0} not supported".format(good_name))
except KeyboardInterrupt:
print("\nNo escape now!") # But still have to catch this one
else:
while True:
try:
return int(input(msg))
except KeyboardInterrupt:
print("\nNo escape now!")
except ValueError:
print("Are you smart enough to enter a number?! (I'm not)")
|
56,452 |
def sql_placeholder_string(n: int) -> str:
"""
Return an SQL placeholder string of length n.
"""
return '(' + ','.join('?'*n) + ')'
|
def sql_placeholder_string(n: int) -> str:
"""
Return placeholder string for a SQL query for n values. E.g. for `n=5`
returns '(?,?,?,?,?)'.
"""
return '(' + ','.join('?'*n) + ')'
|
54,717 |
def test_str_split():
"Test Pandas string split method"
df = pd.DataFrame(
{"text": ["a_b_c", "c_d_e", np.nan, "f_g_h"], "numbers": range(1, 5)}
)
result = process_text(
df=df, column="text", string_function="split", pat="_"
)
expected = pd.DataFrame(
{
"text": [
["a", "b", "c"],
["c", "d", "e"],
np.nan,
["f", "g", "h"],
],
"numbers": [1, 2, 3, 4],
}
)
assert_frame_equal(result, expected)
|
def test_str_split():
"Test Pandas string split method"
df = pd.DataFrame(
{"text": ["a_b_c", "c_d_e", np.nan, "f_g_h"], "numbers": range(1, 5)}
)
result = df.process_text(
column="text", string_function="split", pat="_"
)
expected = pd.DataFrame(
{
"text": [
["a", "b", "c"],
["c", "d", "e"],
np.nan,
["f", "g", "h"],
],
"numbers": [1, 2, 3, 4],
}
)
assert_frame_equal(result, expected)
|
21,350 |
def _check_yield_points(f, changes, start_context):
"""Wraps a generator that is about to passed to defer.inlineCallbacks
checking that after every yield the log contexts are correct.
"""
from synapse.logging.context import LoggingContext
@functools.wraps(f)
def check_yield_points_inner(*args, **kwargs):
expected_context = start_context
gen = f(*args, **kwargs)
last_yield_line_no = 1
result = None
while True:
try:
isFailure = isinstance(result, Failure)
if isFailure:
d = result.throwExceptionIntoGenerator(gen)
else:
d = gen.send(result)
except (StopIteration, defer._DefGen_Return) as e:
if LoggingContext.current_context() != expected_context:
# This happens when the context is lost sometime *after* the
# final yield and returning. E.g. we forgot to yield on a
# function that returns a deferred.
err = (
"Function %r returned and changed context from %s to %s,"
" in %s between %d and end of func"
% (
f.__qualname__,
start_context,
LoggingContext.current_context(),
f.__code__.co_filename,
last_yield_line_no,
)
)
changes.append(err)
# raise Exception(err)
return getattr(e, "value", None)
try:
result = yield d
except Exception as e:
result = Failure(e)
frame = gen.gi_frame
if LoggingContext.current_context() != expected_context:
# This happens because the context is lost sometime *after* the
# previous yield and *after* the current yield. E.g. the
# deferred we waited on didn't follow the rules, or we forgot to
# yield on a function between the two yield points.
err = (
"%s changed context from %s to %s, happened between lines %d and %d in %s"
% (
frame.f_code.co_name,
start_context,
LoggingContext.current_context(),
last_yield_line_no,
frame.f_lineno,
frame.f_code.co_filename,
)
)
changes.append(err)
# raise Exception(err)
expected_context = LoggingContext.current_context()
last_yield_line_no = frame.f_lineno
return check_yield_points_inner
|
def _check_yield_points(f, changes, start_context):
"""Wraps a generator that is about to be passed to defer.inlineCallbacks
checking that after every yield the log contexts are correct.
"""
from synapse.logging.context import LoggingContext
@functools.wraps(f)
def check_yield_points_inner(*args, **kwargs):
expected_context = start_context
gen = f(*args, **kwargs)
last_yield_line_no = 1
result = None
while True:
try:
isFailure = isinstance(result, Failure)
if isFailure:
d = result.throwExceptionIntoGenerator(gen)
else:
d = gen.send(result)
except (StopIteration, defer._DefGen_Return) as e:
if LoggingContext.current_context() != expected_context:
# This happens when the context is lost sometime *after* the
# final yield and returning. E.g. we forgot to yield on a
# function that returns a deferred.
err = (
"Function %r returned and changed context from %s to %s,"
" in %s between %d and end of func"
% (
f.__qualname__,
start_context,
LoggingContext.current_context(),
f.__code__.co_filename,
last_yield_line_no,
)
)
changes.append(err)
# raise Exception(err)
return getattr(e, "value", None)
try:
result = yield d
except Exception as e:
result = Failure(e)
frame = gen.gi_frame
if LoggingContext.current_context() != expected_context:
# This happens because the context is lost sometime *after* the
# previous yield and *after* the current yield. E.g. the
# deferred we waited on didn't follow the rules, or we forgot to
# yield on a function between the two yield points.
err = (
"%s changed context from %s to %s, happened between lines %d and %d in %s"
% (
frame.f_code.co_name,
start_context,
LoggingContext.current_context(),
last_yield_line_no,
frame.f_lineno,
frame.f_code.co_filename,
)
)
changes.append(err)
# raise Exception(err)
expected_context = LoggingContext.current_context()
last_yield_line_no = frame.f_lineno
return check_yield_points_inner
|
31,033 |
def main():
""" Main Function"""
try:
LOG('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
ACCESS_TOKEN = demisto.params().get('access_token')
REFRESH_TOKEN = demisto.params().get('refresh_token')
if demisto.command() == 'get-dlp-report':
report_id = demisto.args().get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
def main():
""" Main Function"""
try:
demisto.info('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
ACCESS_TOKEN = demisto.params().get('access_token')
REFRESH_TOKEN = demisto.params().get('refresh_token')
if demisto.command() == 'get-dlp-report':
report_id = demisto.args().get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
13,800 |
def check_verify_status_by_course(user, course_enrollments):
"""
Determine the per-course verification statuses for a given user.
The possible statuses are:
* VERIFY_STATUS_NEED_TO_VERIFY: The student has not yet submitted photos for verification.
* VERIFY_STATUS_SUBMITTED: The student has submitted photos for verification,
but has have not yet been approved.
* VERIFY_STATUS_RESUBMITTED: The student has re-submitted photos for re-verification while
they still have an active but expiring ID verification
* VERIFY_STATUS_APPROVED: The student has been successfully verified.
* VERIFY_STATUS_MISSED_DEADLINE: The student did not submit photos within the course's deadline.
* VERIFY_STATUS_NEED_TO_REVERIFY: The student has an active verification, but it is
set to expire before the verification deadline for the course.
It is is also possible that a course does NOT have a verification status if:
* The user is not enrolled in a verified mode, meaning that the user didn't pay.
* The course does not offer a verified mode.
* The user submitted photos but an error occurred while verifying them.
* The user submitted photos but the verification was denied.
In the last two cases, we rely on messages in the sidebar rather than displaying
messages for each course.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): The courses the user is enrolled in.
Returns:
dict: Mapping of course keys verification status dictionaries.
If no verification status is applicable to a course, it will not
be included in the dictionary.
The dictionaries have these keys:
* status (str): One of the enumerated status codes.
* days_until_deadline (int): Number of days until the verification deadline.
* verification_good_until (str): Date string for the verification expiration date.
"""
status_by_course = {}
# Before retriving verification data, if the integrity_signature feature is enabled for the course
# let's bypass all logic below. Filter down to those course with integrity_signature not enabled.
enabled_course_enrollments = []
for enrollment in course_enrollments:
if not is_integrity_signature_enabled(enrollment.course_id):
enabled_course_enrollments.append(enrollment)
if len(enabled_course_enrollments) == 0:
return status_by_course
# Retrieve all verifications for the user, sorted in descending
# order by submission datetime
verifications = IDVerificationService.verifications_for_user(user)
# Check whether the user has an active or pending verification attempt
has_active_or_pending = IDVerificationService.user_has_valid_or_pending(user)
# Retrieve expiration_datetime of most recent approved verification
expiration_datetime = IDVerificationService.get_expiration_datetime(user, ['approved'])
verification_expiring_soon = is_verification_expiring_soon(expiration_datetime)
# Retrieve verification deadlines for the enrolled courses
course_deadlines = VerificationDeadline.deadlines_for_enrollments(
CourseEnrollment.enrollments_for_user(user)
)
recent_verification_datetime = None
for enrollment in enabled_course_enrollments:
# If the user hasn't enrolled as verified, then the course
# won't display state related to its verification status.
if enrollment.mode in CourseMode.VERIFIED_MODES:
# Retrieve the verification deadline associated with the course.
# This could be None if the course doesn't have a deadline.
deadline = course_deadlines.get(enrollment.course_id)
relevant_verification = verification_for_datetime(deadline, verifications)
# Picking the max verification datetime on each iteration only with approved status
if relevant_verification is not None and relevant_verification.status == "approved":
recent_verification_datetime = max(
recent_verification_datetime if recent_verification_datetime is not None
else relevant_verification.expiration_datetime,
relevant_verification.expiration_datetime
)
# By default, don't show any status related to verification
status = None
should_display = True
# Check whether the user was approved or is awaiting approval
if relevant_verification is not None:
should_display = relevant_verification.should_display_status_to_user()
if relevant_verification.status == "approved":
if verification_expiring_soon:
status = VERIFY_STATUS_NEED_TO_REVERIFY
else:
status = VERIFY_STATUS_APPROVED
elif relevant_verification.status == "submitted":
if verification_expiring_soon:
status = VERIFY_STATUS_RESUBMITTED
else:
status = VERIFY_STATUS_SUBMITTED
# If the user didn't submit at all, then tell them they need to verify
# If the deadline has already passed, then tell them they missed it.
# If they submitted but something went wrong (error or denied),
# then don't show any messaging next to the course, since we already
# show messages related to this on the left sidebar.
submitted = (
relevant_verification is not None and
relevant_verification.status not in ["created", "ready"]
)
if status is None and not submitted:
if deadline is None or deadline > datetime.now(UTC):
if IDVerificationService.user_is_verified(user) and verification_expiring_soon:
# The user has an active verification, but the verification
# is set to expire within "EXPIRING_SOON_WINDOW" days (default is 4 weeks).
# Tell the student to reverify.
status = VERIFY_STATUS_NEED_TO_REVERIFY
elif not IDVerificationService.user_is_verified(user):
status = VERIFY_STATUS_NEED_TO_VERIFY
else:
# If a user currently has an active or pending verification,
# then they may have submitted an additional attempt after
# the verification deadline passed. This can occur,
# for example, when the support team asks a student
# to reverify after the deadline so they can receive
# a verified certificate.
# In this case, we still want to show them as "verified"
# on the dashboard.
if has_active_or_pending:
status = VERIFY_STATUS_APPROVED
# Otherwise, the student missed the deadline, so show
# them as "honor" (the kind of certificate they will receive).
else:
status = VERIFY_STATUS_MISSED_DEADLINE
# Set the status for the course only if we're displaying some kind of message
# Otherwise, leave the course out of the dictionary.
if status is not None:
days_until_deadline = None
now = datetime.now(UTC)
if deadline is not None and deadline > now:
days_until_deadline = (deadline - now).days
status_by_course[enrollment.course_id] = {
'status': status,
'days_until_deadline': days_until_deadline,
'should_display': should_display,
}
if recent_verification_datetime:
for key, value in status_by_course.items(): # pylint: disable=unused-variable
status_by_course[key]['verification_good_until'] = recent_verification_datetime.strftime("%m/%d/%Y")
return status_by_course
|
def check_verify_status_by_course(user, course_enrollments):
"""
Determine the per-course verification statuses for a given user.
The possible statuses are:
* VERIFY_STATUS_NEED_TO_VERIFY: The student has not yet submitted photos for verification.
* VERIFY_STATUS_SUBMITTED: The student has submitted photos for verification,
but has have not yet been approved.
* VERIFY_STATUS_RESUBMITTED: The student has re-submitted photos for re-verification while
they still have an active but expiring ID verification
* VERIFY_STATUS_APPROVED: The student has been successfully verified.
* VERIFY_STATUS_MISSED_DEADLINE: The student did not submit photos within the course's deadline.
* VERIFY_STATUS_NEED_TO_REVERIFY: The student has an active verification, but it is
set to expire before the verification deadline for the course.
It is is also possible that a course does NOT have a verification status if:
* The user is not enrolled in a verified mode, meaning that the user didn't pay.
* The course does not offer a verified mode.
* The user submitted photos but an error occurred while verifying them.
* The user submitted photos but the verification was denied.
In the last two cases, we rely on messages in the sidebar rather than displaying
messages for each course.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): The courses the user is enrolled in.
Returns:
dict: Mapping of course keys verification status dictionaries.
If no verification status is applicable to a course, it will not
be included in the dictionary.
The dictionaries have these keys:
* status (str): One of the enumerated status codes.
* days_until_deadline (int): Number of days until the verification deadline.
* verification_good_until (str): Date string for the verification expiration date.
"""
status_by_course = {}
# Before retriving verification data, if the integrity_signature feature is enabled for the course
# let's bypass all logic below. Filter down to those course with integrity_signature not enabled.
enabled_course_enrollments = []
for enrollment in course_enrollments:
if not is_integrity_signature_enabled(enrollment.course_id):
enabled_course_enrollments.append(enrollment)
if len(enabled_course_enrollments) == 0:
return status_by_course
# Retrieve all verifications for the user, sorted in descending
# order by submission datetime
verifications = IDVerificationService.verifications_for_user(user)
# Check whether the user has an active or pending verification attempt
has_active_or_pending = IDVerificationService.user_has_valid_or_pending(user)
# Retrieve expiration_datetime of most recent approved verification
expiration_datetime = IDVerificationService.get_expiration_datetime(user, ['approved'])
verification_expiring_soon = is_verification_expiring_soon(expiration_datetime)
# Retrieve verification deadlines for the enrolled courses
course_deadlines = VerificationDeadline.deadlines_for_enrollments(
CourseEnrollment.enrollments_for_user(user)
)
recent_verification_datetime = None
for enrollment in enabled_course_enrollments:
# If the user hasn't enrolled as verified, then the course
# won't display state related to its verification status.
if enrollment.mode in CourseMode.VERIFIED_MODES:
# Retrieve the verification deadline associated with the course.
# This could be None if the course doesn't have a deadline.
deadline = course_deadlines.get(enrollment.course_id)
relevant_verification = verification_for_datetime(deadline, verifications)
# Picking the max verification datetime on each iteration only with approved status
if relevant_verification is not None and relevant_verification.status == "approved":
recent_verification_datetime = max(
recent_verification_datetime if recent_verification_datetime is not None
else relevant_verification.expiration_datetime,
relevant_verification.expiration_datetime
)
# By default, don't show any status related to verification
status = None
should_display = True
# Check whether the user was approved or is awaiting approval
if relevant_verification is not None:
should_display = relevant_verification.should_display_status_to_user()
if relevant_verification.status == "approved":
if verification_expiring_soon:
status = VERIFY_STATUS_NEED_TO_REVERIFY
else:
status = VERIFY_STATUS_APPROVED
elif relevant_verification.status == "submitted":
if verification_expiring_soon:
status = VERIFY_STATUS_RESUBMITTED
else:
status = VERIFY_STATUS_SUBMITTED
# If the user didn't submit at all, then tell them they need to verify
# If the deadline has already passed, then tell them they missed it.
# If they submitted but something went wrong (error or denied),
# then don't show any messaging next to the course, since we already
# show messages related to this on the left sidebar.
submitted = (
relevant_verification is not None and
relevant_verification.status not in ["created", "ready"]
)
if status is None and not submitted:
if deadline is None or deadline > datetime.now(UTC):
if IDVerificationService.user_is_verified(user) and verification_expiring_soon:
# The user has an active verification, but the verification
# is set to expire within "EXPIRING_SOON_WINDOW" days (default is 4 weeks).
# Tell the student to reverify.
status = VERIFY_STATUS_NEED_TO_REVERIFY
elif not IDVerificationService.user_is_verified(user):
status = VERIFY_STATUS_NEED_TO_VERIFY
else:
# If a user currently has an active or pending verification,
# then they may have submitted an additional attempt after
# the verification deadline passed. This can occur,
# for example, when the support team asks a student
# to reverify after the deadline so they can receive
# a verified certificate.
# In this case, we still want to show them as "verified"
# on the dashboard.
if has_active_or_pending:
status = VERIFY_STATUS_APPROVED
# Otherwise, the student missed the deadline, so show
# them as "honor" (the kind of certificate they will receive).
else:
status = VERIFY_STATUS_MISSED_DEADLINE
# Set the status for the course only if we're displaying some kind of message
# Otherwise, leave the course out of the dictionary.
if status is not None:
days_until_deadline = None
now = datetime.now(UTC)
if deadline is not None and deadline > now:
days_until_deadline = (deadline - now).days
status_by_course[enrollment.course_id] = {
'status': status,
'days_until_deadline': days_until_deadline,
'should_display': should_display,
}
if recent_verification_datetime:
for key, value in status_by_course.items(): # pylint: disable=unused-variable
status_by_course[key]['verification_good_until'] = recent_verification_datetime.strftime("%m/%d/%Y")
return status_by_course
|
54,226 |
def assert_optimizes(before: cirq.Circuit, expected: cirq.Circuit):
actual = cirq.Circuit(before)
opt = cirq.MergeInteractions()
opt.optimize_circuit(actual)
# Ignore differences that would be caught by follow-up optimizations.
followup_optimizations: List[Callable[[cirq.Circuit], None]] = [
cirq.merge_single_qubit_gates_into_phased_x_z,
cirq.EjectPhasedPaulis().optimize_circuit,
cirq.EjectZ().optimize_circuit,
]
for post in followup_optimizations:
post(actual)
post(expected)
followup_optimizations_new: List[cirq.TRANSFORMER] = [
cirq.drop_negligible_operations,
cirq.drop_empty_moments,
]
for post_new in followup_optimizations_new:
actual = post_new(actual).unfreeze(copy=False)
expected = post_new(expected).unfreeze(copy=False)
assert actual == expected, f'ACTUAL {actual} : EXPECTED {expected}'
|
def assert_optimizes(before: cirq.Circuit, expected: cirq.Circuit):
actual = cirq.Circuit(before)
opt = cirq.MergeInteractions()
opt.optimize_circuit(actual)
# Ignore differences that would be caught by follow-up optimizations.
followup_optimizations: List[Callable[[cirq.Circuit], None]] = [
cirq.merge_single_qubit_gates_into_phased_x_z,
cirq.EjectPhasedPaulis().optimize_circuit,
cirq.EjectZ().optimize_circuit,
]
for post in followup_optimizations:
post(actual)
post(expected)
followup_transformers: List[cirq.TRANSFORMER] = [
cirq.drop_negligible_operations,
cirq.drop_empty_moments,
]
for transform in followup_transformers:
actual = transform(actual).unfreeze(copy=False)
expected = transform(expected).unfreeze(copy=False)
assert actual == expected, f'ACTUAL {actual} : EXPECTED {expected}'
|
30,382 |
def create_group():
assert conn is not None
args = demisto.args()
object_classes = ["top", "group"]
group_cn = args.get('name')
dn = args.get('dn')
group_name = args.get("name")
group_class = args.get("group-class")
sam_account_name = args.get('name')
group_type_map = {"security": "2147483650", "distribution":"2"}
group_type = group_type_map[args.get("group-type")]
if args.get('members'):
members = args.get('members')
attributes = {
"samAccountName": group_name,
"groupType": group_type,
"member": members
}
else:
attributes = {
"samAccountName": group_name,
"groupType": group_type
}
# create group
success = conn.add(dn, object_classes, attributes)
if not success:
raise Exception("Failed to create group")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created group with DN: {}".format(dn)
}
demisto.results(demisto_entry)
|
def create_group():
assert conn is not None
args = demisto.args()
object_classes = ["top", "group"]
group_cn = args.get('name')
dn = args.get('dn')
group_name = args.get("name")
group_class = args.get("group-class")
sam_account_name = args.get('name')
group_type_map = {"security": "2147483650", "distribution": "2"}
group_type = group_type_map[args.get("group-type")]
if args.get('members'):
members = args.get('members')
attributes = {
"samAccountName": group_name,
"groupType": group_type,
"member": members
}
else:
attributes = {
"samAccountName": group_name,
"groupType": group_type
}
# create group
success = conn.add(dn, object_classes, attributes)
if not success:
raise Exception("Failed to create group")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created group with DN: {}".format(dn)
}
demisto.results(demisto_entry)
|
37,185 |
def _get_credentials():
"""Finds the credentials for a specific test and options.
Returns:
Credentials: set of credentials
Raises:
SkipTest: when credentials can't be found
"""
try:
from qiskit.providers.ibmq.credentials import (Credentials,
discover_credentials)
except ImportError:
raise unittest.SkipTest('qiskit-ibmq-provider could not be found, '
'and is required for executing online tests.'
'Install iskit-ibmq-provider or check your '
'installation.')
if os.getenv('IBMQ_TOKEN') and os.getenv('IBMQ_URL'):
return Credentials(os.getenv('IBMQ_TOKEN'), os.getenv('IBMQ_URL'))
elif os.getenv('QISKIT_TESTS_USE_CREDENTIALS_FILE'):
# Attempt to read the standard credentials.
discovered_credentials = discover_credentials()
if discovered_credentials:
# Decide which credentials to use for testing.
if len(discovered_credentials) > 1:
raise unittest.SkipTest(
"More than 1 credential set found, use: "
"IBMQ_TOKEN and IBMQ_URL env variables to "
"set credentials explicitly")
# Use the first available credentials.
return list(discovered_credentials.values())[0]
raise unittest.SkipTest(
'No IBMQ credentials found for running the test. This is required for '
'running online tests.')
|
def _get_credentials():
"""Finds the credentials for a specific test and options.
Returns:
Credentials: set of credentials
Raises:
SkipTest: when credentials can't be found
"""
try:
from qiskit.providers.ibmq.credentials import (Credentials,
discover_credentials)
except ImportError:
raise unittest.SkipTest('qiskit-ibmq-provider could not be found, '
'and is required for executing online tests. '
'Install iskit-ibmq-provider or check your '
'installation.')
if os.getenv('IBMQ_TOKEN') and os.getenv('IBMQ_URL'):
return Credentials(os.getenv('IBMQ_TOKEN'), os.getenv('IBMQ_URL'))
elif os.getenv('QISKIT_TESTS_USE_CREDENTIALS_FILE'):
# Attempt to read the standard credentials.
discovered_credentials = discover_credentials()
if discovered_credentials:
# Decide which credentials to use for testing.
if len(discovered_credentials) > 1:
raise unittest.SkipTest(
"More than 1 credential set found, use: "
"IBMQ_TOKEN and IBMQ_URL env variables to "
"set credentials explicitly")
# Use the first available credentials.
return list(discovered_credentials.values())[0]
raise unittest.SkipTest(
'No IBMQ credentials found for running the test. This is required for '
'running online tests.')
|
30,827 |
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
def get_security_profiles_command():
"""
Get information about profiles.
"""
security_profile = demisto.args().get('security_profile')
if security_profile:
xpath = f'{XPATH_RULEBASE}profiles/{security_profile}'
else:
xpath = f'{XPATH_RULEBASE}profiles'
result = get_security_profile(xpath)
if security_profile:
security_profiles = result.get('response', {}).get('result', {})
else:
security_profiles = result.get('response', {}).get('result', {}).get('profiles', {})
if '@dirtyId' in security_profiles:
LOG(f'Found uncommitted item:\n{security_profiles}')
raise Exception('Please commit the instance prior to getting the security profiles.')
human_readable = ''
content: List[Dict[str, Any]] = []
context = {}
if 'spyware' in security_profiles:
profiles = security_profiles.get('spyware').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': spyware_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
spyware_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': spyware_rules
}
human_readable = tableToMarkdown('Anti Spyware Profiles', content)
context.update({"Panorama.Spyware(val.Name == obj.Name)": content})
if 'virus' in security_profiles:
profiles = security_profiles.get('virus', {}).get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Decoder': antivirus_rules
})
else:
rules = profiles.get('decoder', {}).get('entry', [])
antivirus_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': antivirus_rules
}
human_readable += tableToMarkdown('Antivirus Profiles', content)
context.update({"Panorama.Antivirus(val.Name == obj.Name)": content})
if 'file-blocking' in security_profiles:
profiles = security_profiles.get('file-blocking').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': file_blocking_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
file_blocking_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': file_blocking_rules
}
human_readable += tableToMarkdown('File Blocking Profiles', content)
context.update({"Panorama.FileBlocking(val.Name == obj.Name)": content})
if 'vulnerability' in security_profiles:
profiles = security_profiles.get('vulnerability').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': vulnerability_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
vulnerability_rules = prettify_profiles_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': vulnerability_rules
}
human_readable += tableToMarkdown('vulnerability Protection Profiles', content)
context.update({"Panorama.Vulnerability(val.Name == obj.Name)": content})
if 'data-filtering' in security_profiles:
profiles = security_profiles.get('data-filtering').get('entry', {})
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': data_filtering_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
data_filtering_rules = prettify_data_filtering_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': data_filtering_rules
}
human_readable += tableToMarkdown('Data Filtering Profiles', content)
context.update({"Panorama.DataFiltering(val.Name == obj.Name)": content})
if 'url-filtering' in security_profiles:
profiles = security_profiles.get('url-filtering').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
url_filtering_rules = prettify_get_url_filter(profile)
content.append({
'Name': profile['@name'],
'Rules': url_filtering_rules
})
else:
url_filtering_rules = prettify_get_url_filter(profiles)
content = {
'Name': profiles['@name'],
'Rules': url_filtering_rules
}
human_readable += tableToMarkdown('URL Filtering Profiles', content)
context.update({"Panorama.URLFilter(val.Name == obj.Name)": content})
if 'wildfire-analysis' in security_profiles:
profiles = security_profiles.get('wildfire-analysis').get('entry', [])
if isinstance(profiles, list):
for profile in profiles:
rules = profile.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content.append({
'Name': profile['@name'],
'Rules': wildfire_rules
})
else:
rules = profiles.get('rules', {}).get('entry', [])
wildfire_rules = prettify_wildfire_rules(rules)
content = {
'Name': profiles['@name'],
'Rules': wildfire_rules
}
human_readable += tableToMarkdown('WildFire Profiles', content)
context.update({"Panorama.WildFire(val.Name == obj.Name)": content})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': context
})
|
34,411 |
def _concat_entity_labels(
token: Token, entities: List[Dict], extractors: Optional[Set[Text]] = None
):
"""Concatenate labels for entity type, role, and group for evaluation.
In order to calculate metrics also for entity type, role, and group we need to
concatenate their labels. For example, 'location.destination'. This allows
us to report metrics for every combination of entity type, role, and group.
Args:
token: the token we are looking at
entities: the available entities
extractors: the extractor of interest
Returns:
the entity label of the provided token
"""
entity_label = determine_token_labels(
token, entities, extractors, ENTITY_ATTRIBUTE_TYPE
)
group_label = determine_token_labels(
token, entities, extractors, ENTITY_ATTRIBUTE_GROUP
)
role_label = determine_token_labels(
token, entities, extractors, ENTITY_ATTRIBUTE_ROLE
)
if entity_label == role_label == group_label == NO_ENTITY_TAG:
return NO_ENTITY_TAG
labels = [entity_label, group_label, role_label]
labels = [label for label in labels if label != NO_ENTITY_TAG]
return ".".join(labels)
|
def _concat_entity_labels(
token: Token, entities: List[Dict], extractors: Optional[Set[Text]] = None
) -> Text:
"""Concatenate labels for entity type, role, and group for evaluation.
In order to calculate metrics also for entity type, role, and group we need to
concatenate their labels. For example, 'location.destination'. This allows
us to report metrics for every combination of entity type, role, and group.
Args:
token: the token we are looking at
entities: the available entities
extractors: the extractor of interest
Returns:
the entity label of the provided token
"""
entity_label = determine_token_labels(
token, entities, extractors, ENTITY_ATTRIBUTE_TYPE
)
group_label = determine_token_labels(
token, entities, extractors, ENTITY_ATTRIBUTE_GROUP
)
role_label = determine_token_labels(
token, entities, extractors, ENTITY_ATTRIBUTE_ROLE
)
if entity_label == role_label == group_label == NO_ENTITY_TAG:
return NO_ENTITY_TAG
labels = [entity_label, group_label, role_label]
labels = [label for label in labels if label != NO_ENTITY_TAG]
return ".".join(labels)
|
516 |
def _can_delete_saved_report(report, user, domain):
if domain != report.domain:
return False
return user._id == report.owner_id
|
def _can_delete_saved_report(report, user, domain):
return domain == report.domain and user._id == report.owner_id
|
14,539 |
def validate_nslr_data(eye_positions: np.ndarray, eye_timestamps: np.ndarray):
def has_nan(arr: np.ndarray):
return np.any(np.isnan(arr))
def is_monotonic(arr: np.ndarray):
return np.all(arr[:-1] <= arr[1:])
def is_unique(arr: np.ndarray):
return arr.shape == np.unique(arr, axis=0).shape
if has_nan(eye_positions):
raise ValueError("Gaze data contains NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps contains NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps are not monotonic")
if not is_unique(eye_timestamps):
raise ValueError("Gaze timestamps are not unique. Please recalculate gaze mapping with only 1 mapper enabled")
|
def validate_nslr_data(eye_positions: np.ndarray, eye_timestamps: np.ndarray):
def has_nan(arr: np.ndarray):
return np.any(np.isnan(arr))
def is_monotonic(arr: np.ndarray):
return np.all(arr[:-1] <= arr[1:])
def is_unique(arr: np.ndarray):
return arr.shape == np.unique(arr, axis=0).shape
if has_nan(eye_positions):
raise ValueError("Gaze data contains NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps contain NaN values")
if not is_monotonic(eye_timestamps):
raise ValueError("Gaze timestamps are not monotonic")
if not is_unique(eye_timestamps):
raise ValueError("Gaze timestamps are not unique. Please recalculate gaze mapping with only 1 mapper enabled")
|
57,946 |
def send_mail(args: dict, sg_from_email: str, sg_sender_name: str, sg):
message = Mail() # type: ignore[name-defined]
attach_ids = args.get('AttachIDs')
attach_names = args.get('AttachNames') or ""
if attach_ids:
process_attachments(message, attach_ids, attach_names)
categories = args.get('Categories')
if categories:
categories = categories.split(",")
for category in categories:
message.category = Category(category) # type: ignore[name-defined]
batch_id = args.get('BatchID')
if batch_id:
message.batch_id = BatchId(batch_id) # type: ignore[name-defined]
send_at = args.get('SendAt')
if send_at:
t = dateutil.parser.parse(send_at)
send_time = time.mktime(t.timetuple())
message.send_at = SendAt(int(send_time)) # type: ignore[name-defined]
asm = args.get('Asm')
if asm:
asm = asm if type(asm) is dict else json.loads(asm)
message.asm = Asm(GroupId(asm["group_id"]), GroupsToDisplay(asm["groups_to_display"])) # type: ignore[name-defined]
custom_args = args.get('CustomArgs')
if custom_args:
custom_args = custom_args if type(custom_args) is dict else json.loads(custom_args)
for key in custom_args:
message.custom_arg = CustomArg(key, custom_args[key]) # type: ignore[name-defined]
ip_pool_name = args.get('IPPoolName')
if ip_pool_name:
message.ip_pool_name = IpPoolName(ip_pool_name) # type: ignore[name-defined]
# Mail Tracking settings
tracking_settings = TrackingSettings() # type: ignore[name-defined]
click_tracking = args.get('ClickTracking')
if click_tracking:
click_tracking = click_tracking if type(click_tracking) is dict else json.loads(click_tracking)
is_enable = False if click_tracking["enable"] == 'False' else True
tracking_settings.click_tracking = ClickTracking(is_enable, # type: ignore[name-defined]
click_tracking["enable_text"])
open_tracking = args.get('OpenTracking')
if open_tracking:
open_tracking = open_tracking if type(open_tracking) is dict else json.loads(open_tracking)
is_enable = False if open_tracking["enable"] == 'False' else True
tracking_settings.open_tracking = OpenTracking( # type: ignore[name-defined]
is_enable,
OpenTrackingSubstitutionTag(open_tracking["substitution_tag"])) # type: ignore[name-defined]
sub_tracking = args.get('SubscriptionTracking')
if sub_tracking:
sub_tracking = sub_tracking if type(sub_tracking) is dict else json.loads(sub_tracking)
is_enable = False if sub_tracking["enable"] == 'False' else True
tracking_settings.subscription_tracking = SubscriptionTracking( # type: ignore[name-defined]
is_enable,
SubscriptionText(sub_tracking["text"]), # type: ignore[name-defined]
SubscriptionHtml(sub_tracking["html"]), # type: ignore[name-defined]
SubscriptionSubstitutionTag(sub_tracking["substitution_tag"])) # type: ignore[name-defined]
ganalytics = args.get('GAnalytics')
if ganalytics:
ganalytics = ganalytics if type(ganalytics) is dict else json.loads(ganalytics)
is_enable = False if ganalytics["enable"] == 'False' else True
tracking_settings.ganalytics = Ganalytics( # type: ignore[name-defined]
is_enable,
UtmSource(ganalytics["utm_source"]), # type: ignore[name-defined]
UtmMedium(ganalytics["utm_medium"]), # type: ignore[name-defined]
UtmTerm(ganalytics["utm_term"]), # type: ignore[name-defined]
UtmContent(ganalytics["utm_content"]), # type: ignore[name-defined]
UtmCampaign(ganalytics["utm_campaign"])) # type: ignore[name-defined]
message.tracking_settings = tracking_settings
# Mail Settings
mail_settings = MailSettings() # type: ignore[name-defined]
bcc_mail_set = args.get('BccSettings')
if bcc_mail_set:
bcc_mail_set = bcc_mail_set if type(bcc_mail_set) is dict else json.loads(bcc_mail_set)
is_enable = False if bcc_mail_set["enable"] == 'False' else True
mail_settings.bcc_settings = BccSettings( # type: ignore[name-defined]
is_enable,
BccSettingsEmail(bcc_mail_set["email"])) # type: ignore[name-defined]
footer = args.get('Footer')
if footer:
footer = footer if type(footer) is dict else json.loads(footer)
is_enable = False if footer["enable"] == 'False' else True
mail_settings.footer_settings = FooterSettings( # type: ignore[name-defined]
is_enable,
FooterText(footer["text"]), # type: ignore[name-defined]
FooterHtml(footer["html"])) # type: ignore[name-defined]
spam_check = args.get('SpamCheck')
if spam_check:
spam_check = spam_check if type(spam_check) is dict else json.loads(spam_check)
is_enable = False if spam_check["enable"] == 'False' else True
mail_settings.spam_check = SpamCheck( # type: ignore[name-defined]
is_enable,
SpamThreshold(spam_check["threshold"]), # type: ignore[name-defined]
SpamUrl(spam_check["post_to_url"])) # type: ignore[name-defined]
sandbox_mode = args.get('SandboxMode')
if sandbox_mode:
sandbox_mode = False if sandbox_mode == 'False' else True
mail_settings.sandbox_mode = SandBoxMode(sandbox_mode) # type: ignore[name-defined]
bypass_list_management = args.get('BypassListManagement')
if bypass_list_management:
bypass_list_management = False if bypass_list_management == 'False' else True
mail_settings.bypass_list_management = BypassListManagement(bypass_list_management) # type: ignore[name-defined]
message.mail_settings = mail_settings
headers = args.get('Headers')
if headers:
headers = headers if type(headers) is dict else json.loads(headers)
for key in headers:
message.header = Header(key, headers[key]) # type: ignore[name-defined]
template_id = args.get('TemplateID')
if template_id:
message.template_id = TemplateId(template_id) # type: ignore[name-defined]
subject = args.get('Subject')
message.subject = Subject(subject) # type: ignore[name-defined]
email_body = args.get('HtmlBody')
if email_body:
message.content = Content(MimeType.html, email_body) # type: ignore[name-defined]
raw_body = args.get('RawBody')
if raw_body:
message.content = Content(MimeType.text, raw_body) # type: ignore[name-defined]
reply_to_email = args.get('ReplyTo')
if reply_to_email:
message.reply_to = ReplyTo(reply_to_email, None) # type: ignore[name-defined]
elif reply_to_email == "":
return "Send-email failed: replyTo email is empty, please provide valid email"
message.from_email = From(sg_from_email, sg_sender_name) # type: ignore[name-defined]
to_emails = args.get('ToEmails')
to_emails = to_emails if isinstance(to_emails, list) else to_emails.split(",") # type: ignore[union-attr]
for email in to_emails:
message.to = To(email, None, p=0) # type: ignore[name-defined]
cc_emails = args.get('Cc')
if cc_emails:
cc_emails = cc_emails if isinstance(cc_emails, list) else cc_emails.split(",")
for email in cc_emails:
message.cc = Cc(email, None, p=0) # type: ignore[name-defined]
elif cc_emails == "":
return "Send-email failed: CC list is empty, please provide valid email"
bcc_emails = args.get('Bcc')
if bcc_emails:
bcc_emails = bcc_emails if isinstance(bcc_emails, list) else bcc_emails.split(",")
for email in bcc_emails:
message.bcc = Bcc(email, None, p=0) # type: ignore[name-defined]
elif bcc_emails == "":
return "Send-email failed: BCC list is empty, please provide valid email"
response = sg.send(message)
if response.status_code == 202:
return "Email Sent successfully"
else:
return "Failed to send email " + response.status_code
|
def send_mail(args: dict, sg_from_email: str, sg_sender_name: str, sg):
message = Mail() # type: ignore[name-defined]
attach_ids = args.get('AttachIDs')
attach_names = args.get('AttachNames') or ""
if attach_ids:
process_attachments(message, attach_ids, attach_names)
categories = args.get('Categories')
if categories:
categories = categories.split(",")
for category in categories:
message.category = Category(category) # type: ignore[name-defined]
batch_id = args.get('BatchID')
if batch_id:
message.batch_id = BatchId(batch_id) # type: ignore[name-defined]
send_at = args.get('SendAt')
if send_at:
t = dateutil.parser.parse(send_at)
send_time = time.mktime(t.timetuple())
message.send_at = SendAt(int(send_time)) # type: ignore[name-defined]
asm = args.get('Asm')
if asm:
asm = asm if type(asm) is dict else json.loads(asm)
message.asm = Asm(GroupId(asm["group_id"]), GroupsToDisplay(asm["groups_to_display"])) # type: ignore[name-defined]
custom_args = args.get('CustomArgs')
if custom_args:
custom_args = custom_args if type(custom_args) is dict else json.loads(custom_args)
for key in custom_args:
message.custom_arg = CustomArg(key, custom_args[key]) # type: ignore[name-defined]
ip_pool_name = args.get('IPPoolName')
if ip_pool_name:
message.ip_pool_name = IpPoolName(ip_pool_name) # type: ignore[name-defined]
# Mail Tracking settings
tracking_settings = TrackingSettings() # type: ignore[name-defined]
click_tracking = args.get('ClickTracking')
if click_tracking:
click_tracking = click_tracking if type(click_tracking) is dict else json.loads(click_tracking)
is_enable = False if click_tracking["enable"] == 'False' else True
tracking_settings.click_tracking = ClickTracking(is_enable, # type: ignore[name-defined]
click_tracking["enable_text"])
open_tracking = args.get('OpenTracking')
if open_tracking:
open_tracking = open_tracking if type(open_tracking) is dict else json.loads(open_tracking)
is_enable = False if open_tracking["enable"] == 'False' else True
tracking_settings.open_tracking = OpenTracking( # type: ignore[name-defined]
is_enable,
OpenTrackingSubstitutionTag(open_tracking["substitution_tag"])) # type: ignore[name-defined]
sub_tracking = args.get('SubscriptionTracking')
if sub_tracking:
sub_tracking = sub_tracking if type(sub_tracking) is dict else json.loads(sub_tracking)
is_enable = False if sub_tracking["enable"] == 'False' else True
tracking_settings.subscription_tracking = SubscriptionTracking( # type: ignore[name-defined]
is_enable,
SubscriptionText(sub_tracking["text"]), # type: ignore[name-defined]
SubscriptionHtml(sub_tracking["html"]), # type: ignore[name-defined]
SubscriptionSubstitutionTag(sub_tracking["substitution_tag"])) # type: ignore[name-defined]
ganalytics = args.get('GAnalytics')
if ganalytics:
ganalytics = ganalytics if type(ganalytics) is dict else json.loads(ganalytics)
is_enable = False if ganalytics["enable"] == 'False' else True
tracking_settings.ganalytics = Ganalytics( # type: ignore[name-defined]
is_enable,
UtmSource(ganalytics["utm_source"]), # type: ignore[name-defined]
UtmMedium(ganalytics["utm_medium"]), # type: ignore[name-defined]
UtmTerm(ganalytics["utm_term"]), # type: ignore[name-defined]
UtmContent(ganalytics["utm_content"]), # type: ignore[name-defined]
UtmCampaign(ganalytics["utm_campaign"])) # type: ignore[name-defined]
message.tracking_settings = tracking_settings
# Mail Settings
mail_settings = MailSettings() # type: ignore[name-defined]
bcc_mail_set = args.get('BccSettings')
if bcc_mail_set:
bcc_mail_set = bcc_mail_set if type(bcc_mail_set) is dict else json.loads(bcc_mail_set)
is_enable = False if bcc_mail_set["enable"] == 'False' else True
mail_settings.bcc_settings = BccSettings( # type: ignore[name-defined]
is_enable,
BccSettingsEmail(bcc_mail_set["email"])) # type: ignore[name-defined]
footer = args.get('Footer')
if footer:
footer = footer if type(footer) is dict else json.loads(footer)
is_enable = False if footer["enable"] == 'False' else True
mail_settings.footer_settings = FooterSettings( # type: ignore[name-defined]
is_enable,
FooterText(footer["text"]), # type: ignore[name-defined]
FooterHtml(footer["html"])) # type: ignore[name-defined]
spam_check = args.get('SpamCheck')
if spam_check:
spam_check = spam_check if type(spam_check) is dict else json.loads(spam_check)
is_enable = False if spam_check["enable"] == 'False' else True
mail_settings.spam_check = SpamCheck( # type: ignore[name-defined]
is_enable,
SpamThreshold(spam_check["threshold"]), # type: ignore[name-defined]
SpamUrl(spam_check["post_to_url"])) # type: ignore[name-defined]
sandbox_mode = args.get('SandboxMode')
if sandbox_mode:
sandbox_mode = False if sandbox_mode == 'False' else True
mail_settings.sandbox_mode = SandBoxMode(sandbox_mode) # type: ignore[name-defined]
bypass_list_management = args.get('BypassListManagement')
if bypass_list_management:
bypass_list_management = False if bypass_list_management == 'False' else True
mail_settings.bypass_list_management = BypassListManagement(bypass_list_management) # type: ignore[name-defined]
message.mail_settings = mail_settings
headers = args.get('Headers')
if headers:
headers = headers if type(headers) is dict else json.loads(headers)
for key in headers:
message.header = Header(key, headers[key]) # type: ignore[name-defined]
template_id = args.get('TemplateID')
if template_id:
message.template_id = TemplateId(template_id) # type: ignore[name-defined]
subject = args.get('Subject')
message.subject = Subject(subject) # type: ignore[name-defined]
email_body = args.get('HtmlBody')
if email_body:
message.content = Content(MimeType.html, email_body) # type: ignore[name-defined]
raw_body = args.get('RawBody')
if raw_body:
message.content = Content(MimeType.text, raw_body) # type: ignore[name-defined]
reply_to_email = args.get('ReplyTo')
if reply_to_email:
message.reply_to = ReplyTo(reply_to_email, None) # type: ignore[name-defined]
elif reply_to_email == "":
return "Send-email failed: replyTo email is empty, please provide valid email"
message.from_email = From(sg_from_email, sg_sender_name) # type: ignore[name-defined]
to_emails = args.get('ToEmails')
to_emails = to_emails if isinstance(to_emails, list) else to_emails.split(",") # type: ignore[union-attr]
for email in to_emails:
message.to = To(email, None, p=0) # type: ignore[name-defined]
cc_emails = args.get('Cc')
if cc_emails:
cc_emails = cc_emails if isinstance(cc_emails, list) else cc_emails.split(",")
for email in cc_emails:
message.cc = Cc(email, None, p=0) # type: ignore[name-defined]
elif cc_emails == "":
return "Send-email failed: CC list is empty, please provide valid email"
bcc_emails = args.get('Bcc')
if bcc_emails:
bcc_emails = bcc_emails if isinstance(bcc_emails, list) else bcc_emails.split(",")
for email in bcc_emails:
message.bcc = Bcc(email, None, p=0) # type: ignore[name-defined]
else:
raise DemistoException('BCC list is empty, please provide valid email.')
response = sg.send(message)
if response.status_code == 202:
return "Email Sent successfully"
else:
return "Failed to send email " + response.status_code
|
55,097 |
def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals and
:math:`C_{\alpha i}` and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle`
are the representations of the molecular orbitals and the operator
:math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
|
def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals,
:math:`C_{\alpha i}` are the coefficients defining the molecular orbitals,
and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle` is the representation
of the operator :math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
|
7,855 |
def test_atoms_distribmat_cell(uo2, water):
""" Test if correct number of atoms is returned for a cell with
'distribmat' fill
"""
c = openmc.Cell(fill=[uo2, water])
c.volume = 6.0
# Calculate the expected number of atoms
expected_nucs = ['U235', 'O16', 'H1']
M_uo2 = ((atomic_mass('U235') + 2 * atomic_mass('O16'))/3)
M_water = ((2 * atomic_mass('H1') + atomic_mass('O16'))/3)
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M_uo2 * AVOGADRO * 3.0) # U235
expected_atoms.append(2/3 * uo2.density/M_uo2 * AVOGADRO * 3.0 +
1/3 * water.density/M_water * AVOGADRO * 3.0) # O16
expected_atoms.append(2/3 * water.density/M_water * AVOGADRO * 3.0) # H1
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
|
def test_atoms_distribmat_cell(uo2, water):
""" Test if correct number of atoms is returned for a cell with
'distribmat' fill
"""
c = openmc.Cell(fill=[uo2, water])
c.volume = 6.0
# Calculate the expected number of atoms
expected_nucs = ['U235', 'O16', 'H1']
M_uo2 = ((atomic_mass('U235') + 2 * atomic_mass('O16'))/3)
M_water = (2 * atomic_mass('H1') + atomic_mass('O16')) / 3
expected_atoms = list()
expected_atoms.append(1/3 * uo2.density/M_uo2 * AVOGADRO * 3.0) # U235
expected_atoms.append(2/3 * uo2.density/M_uo2 * AVOGADRO * 3.0 +
1/3 * water.density/M_water * AVOGADRO * 3.0) # O16
expected_atoms.append(2/3 * water.density/M_water * AVOGADRO * 3.0) # H1
tuples = list(c.atoms.items())
for nuc, atom_num, t in zip(expected_nucs, expected_atoms, tuples):
assert nuc == t[0]
assert atom_num == pytest.approx(t[1])
|
41,345 |
def rename_doubles(batch_metrics_names, epoch_metrics_names, torch_metrics_names):
metrics_names = rename_doubles_from_list(batch_metrics_names + epoch_metrics_names + torch_metrics_names)
batch_metrics_names = metrics_names[: len(batch_metrics_names)]
epoch_metrics_names = metrics_names[len(batch_metrics_names) : len(batch_metrics_names) + len(epoch_metrics_names)]
torch_metrics_names = metrics_names[len(batch_metrics_names) + len(epoch_metrics_names) :]
return batch_metrics_names, epoch_metrics_names, torch_metrics_names
|
def rename_doubles(batch_metrics_names: List, epoch_metrics_names: List, torch_metrics_names: List) -> Tuple:
metrics_names = rename_doubles_from_list(batch_metrics_names + epoch_metrics_names + torch_metrics_names)
batch_metrics_names = metrics_names[: len(batch_metrics_names)]
epoch_metrics_names = metrics_names[len(batch_metrics_names) : len(batch_metrics_names) + len(epoch_metrics_names)]
torch_metrics_names = metrics_names[len(batch_metrics_names) + len(epoch_metrics_names) :]
return batch_metrics_names, epoch_metrics_names, torch_metrics_names
|
30,107 |
def test_prefetch_subject_scaled_is_larger(runtmp, linear_gather):
# test prefetch where query and subject db both have multiple ksizes
c = runtmp
# make a query sketch with scaled=1000
fa = utils.get_test_data('genome-s10.fa.gz')
c.run_sourmash('sketch', 'dna', fa, '-o', 'query.sig')
assert os.path.exists(runtmp.output('query.sig'))
# this has a scaled of 10000, from same genome:
against1 = utils.get_test_data('scaled/genome-s10.fa.gz.sig')
against2 = utils.get_test_data('scaled/all.sbt.zip')
against3 = utils.get_test_data('scaled/all.lca.json')
# run against large scaled, then small (self)
c.run_sourmash('prefetch', 'query.sig', against1, against2, against3,
'query.sig', linear_gather)
print(c.last_result.status)
print(c.last_result.out)
print(c.last_result.err)
assert c.last_result.status == 0
assert 'total of 8 matching signatures.' in c.last_result.err
assert 'of 48 distinct query hashes, 48 were found in matches above threshold.' in c.last_result.err
assert 'final scaled value (max across query and all matches) is 10000' in c.last_result.err
|
def test_prefetch_subject_scaled_is_larger(runtmp, linear_gather):
# test prefetch where subject scaled is larger
c = runtmp
# make a query sketch with scaled=1000
fa = utils.get_test_data('genome-s10.fa.gz')
c.run_sourmash('sketch', 'dna', fa, '-o', 'query.sig')
assert os.path.exists(runtmp.output('query.sig'))
# this has a scaled of 10000, from same genome:
against1 = utils.get_test_data('scaled/genome-s10.fa.gz.sig')
against2 = utils.get_test_data('scaled/all.sbt.zip')
against3 = utils.get_test_data('scaled/all.lca.json')
# run against large scaled, then small (self)
c.run_sourmash('prefetch', 'query.sig', against1, against2, against3,
'query.sig', linear_gather)
print(c.last_result.status)
print(c.last_result.out)
print(c.last_result.err)
assert c.last_result.status == 0
assert 'total of 8 matching signatures.' in c.last_result.err
assert 'of 48 distinct query hashes, 48 were found in matches above threshold.' in c.last_result.err
assert 'final scaled value (max across query and all matches) is 10000' in c.last_result.err
|
36,282 |
def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult],
n_shots: int = 500, symm_type: int = -1,
noisy_program: Optional[Program] = None,
active_reset: bool = False,
show_progress_bar: bool = False) \
-> Iterable[ExperimentResult]:
"""
Calibrates the expectation and std_err of the input expt_results and updates those estimates.
The input expt_results should be estimated with symmetrized readout error for this to work
properly. Calibration is done by measuring expectation values of eigenstates of the
observable, which ideally should yield either +/- 1 but in practice will have magnitude less
than 1. For default exhaustive_symmetrization the calibration expectation magnitude
averaged over all eigenvectors is recorded as calibration_expectation. The original
expectation is moved to raw_expectation and replaced with the old value scaled by the inverse
calibration expectation.
:param qc: a quantum computer object on which to run the programs necessary to calibrate each
result.
:param expt_results: a list of results, each of which will be separately calibrated.
:param n_shots: the number of shots to run for each eigenvector
:param symm_type: the type of symmetrization
* -1 -- exhaustive symmetrization uses every possible combination of flips; this option
is the default since it ensures proper calibration, but is exponential in the
weight of each observable.
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
TODO: accomodate calibration for weight > symmetrization strength (symm_type)
Currently, the symmetrization type must be at least the maximum weight of any observable
estimated and also match the symmetrization type used to estimate the observables of the
input ExperimentResults.
:param noisy_program: an optional program from which to inherit a noise model; only relevant
for running on a QVM
:param active_reset: whether or not to begin the program by actively resetting. If true,
execution of each of the returned programs in a loop on the QPU will generally be faster.
:param show_progress_bar: displays a progress bar via tqdm if true.
:return: a copy of the input results with updated estimates and calibration results.
"""
# get unique observables that will need to be calibrated
observables = {copy(res.setting.out_operator) for res in expt_results}
calibrations = {}
for obs in tqdm(observables, disable=not show_progress_bar):
prog = get_calibration_program(obs, noisy_program, active_reset)
meas_qs = obs.get_qubits()
results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs)
# Obtain statistics from result of experiment
# TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements
# even though it only needs the observable.
setting = ExperimentSetting(zeros_state(meas_qs), obs)
obs_mean, obs_var = _stats_from_measurements(results,
{q: idx for idx, q in enumerate(meas_qs)},
setting, len(results))
calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results))
for expt_result in expt_results:
# TODO: allow weight > symm_type
if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()):
warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} '
f'currently not supported since it acts on more qubits than the '
f'symm_type {symm_type}.')
# get the calibration data for this observable
cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()]
obs_mean, obs_var, counts = cal_data
# Use the calibration to correct the mean and var
result_mean = expt_result.expectation
result_var = expt_result.std_err ** 2
corrected_mean = result_mean / obs_mean
corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var)
yield ExperimentResult(
setting=expt_result.setting,
expectation=corrected_mean,
std_err=np.sqrt(corrected_var),
total_counts=expt_result.total_counts,
raw_expectation=result_mean,
raw_std_err=expt_result.std_err,
calibration_expectation=obs_mean,
calibration_std_err=np.sqrt(obs_var),
calibration_counts=counts
)
|
def calibrate_observable_estimates(qc: QuantumComputer, expt_results: List[ExperimentResult],
n_shots: int = 500, symm_type: int = -1,
noisy_program: Optional[Program] = None,
active_reset: bool = False,
show_progress_bar: bool = False) \
-> Iterable[ExperimentResult]:
"""
Calibrates the expectation and std_err of the input expt_results and updates those estimates.
The input expt_results should be estimated with symmetrized readout error for this to work
properly. Calibration is done by measuring expectation values of eigenstates of the
observable, which ideally should yield either +/- 1 but in practice will have magnitude less
than 1. For default exhaustive_symmetrization the calibration expectation magnitude
averaged over all eigenvectors is recorded as calibration_expectation. The original
expectation is moved to raw_expectation and replaced with the old value scaled by the inverse
calibration expectation.
:param qc: a quantum computer object on which to run the programs necessary to calibrate each
result.
:param expt_results: a list of results, each of which will be separately calibrated.
:param n_shots: the number of shots to run for each eigenvector
:param symm_type: the type of symmetrization
* -1 -- exhaustive symmetrization uses every possible combination of flips; this option
is the default since it ensures proper calibration, but is exponential in the
weight of each observable.
* 0 -- no symmetrization
* 1 -- symmetrization using an OA with strength 1
* 2 -- symmetrization using an OA with strength 2
* 3 -- symmetrization using an OA with strength 3
TODO: accomodate calibration for weight > symmetrization strength (symm_type)
Currently, the symmetrization type must be at least the maximum weight of any observable
estimated and also match the symmetrization type used to estimate the observables of the
input ExperimentResults.
:param noisy_program: an optional program from which to inherit a noise model; only relevant
for running on a QVM
:param active_reset: whether or not to begin the program by actively resetting. If true,
execution of each of the returned programs in a loop on the QPU will generally be faster.
:param show_progress_bar: displays a progress bar if true.
:return: a copy of the input results with updated estimates and calibration results.
"""
# get unique observables that will need to be calibrated
observables = {copy(res.setting.out_operator) for res in expt_results}
calibrations = {}
for obs in tqdm(observables, disable=not show_progress_bar):
prog = get_calibration_program(obs, noisy_program, active_reset)
meas_qs = obs.get_qubits()
results = qc.run_symmetrized_readout(prog, n_shots, symm_type, meas_qs)
# Obtain statistics from result of experiment
# TODO: we have to fabricate an ExperimentSetting to pass to _stats_from_measurements
# even though it only needs the observable.
setting = ExperimentSetting(zeros_state(meas_qs), obs)
obs_mean, obs_var = _stats_from_measurements(results,
{q: idx for idx, q in enumerate(meas_qs)},
setting, len(results))
calibrations[obs.operations_as_set()] = (obs_mean, obs_var, len(results))
for expt_result in expt_results:
# TODO: allow weight > symm_type
if -1 < symm_type < len(expt_result.setting.out_operator.get_qubits()):
warnings.warn(f'Calibration of observable {expt_result.setting.out_operator} '
f'currently not supported since it acts on more qubits than the '
f'symm_type {symm_type}.')
# get the calibration data for this observable
cal_data = calibrations[expt_result.setting.out_operator.operations_as_set()]
obs_mean, obs_var, counts = cal_data
# Use the calibration to correct the mean and var
result_mean = expt_result.expectation
result_var = expt_result.std_err ** 2
corrected_mean = result_mean / obs_mean
corrected_var = ratio_variance(result_mean, result_var, obs_mean, obs_var)
yield ExperimentResult(
setting=expt_result.setting,
expectation=corrected_mean,
std_err=np.sqrt(corrected_var),
total_counts=expt_result.total_counts,
raw_expectation=result_mean,
raw_std_err=expt_result.std_err,
calibration_expectation=obs_mean,
calibration_std_err=np.sqrt(obs_var),
calibration_counts=counts
)
|
43,821 |
def apply_controlled_Q(fn, wires, target_wire, control_wire, work_wires):
r"""Provides the circuit to apply a controlled version of the :math:`\mathcal{Q}` unitary
defined in `this <https://arxiv.org/abs/1805.00109>`__ paper.
Given a callable ``fn`` input corresponding to the :math:`\mathcal{F}` unitary in the above
paper, this function transforms the circuit into a controlled-version of the :math:`\mathcal{Q}`
unitary which forms part of the quantum Monte Carlo algorithm. In this algorithm, one of the
wires acted upon by :math:`\mathcal{F}`, specified by ``target_wire``, is used to embed a
Monte Carlo estimation problem. The :math:`\mathcal{Q}` unitary is then designed to encode the
target expectation value as a phase in one of its eigenvalues.
This function transforms to a controlled version of :math:`\mathcal{Q}` that is compatible with
quantum phase estimation (see :class:`~.QuantumPhaseEstimation` for more details).
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
control_wire (Union[Wires, int]): the control wire from the register of phase estimation
qubits
work_wires (Union[Wires, Sequence[int], or int]): additional work wires used when
decomposing :math:`\mathcal{Q}`
Returns:
function: The input function transformed to the :math:`\mathcal{Q}` unitary
Raises:
ValueError: if ``target_wire`` is not in ``wires``
"""
fn_inv = adjoint(fn)
wires = Wires(wires)
target_wire = Wires(target_wire)
control_wire = Wires(control_wire)
work_wires = Wires(work_wires)
if not wires.contains_wires(target_wire):
raise ValueError("The target wire must be contained within wires")
@wraps(fn)
def wrapper(*args, **kwargs):
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
return wrapper
|
def apply_controlled_Q(fn, wires, target_wire, control_wire, work_wires):
r"""Provides the circuit to apply a controlled version of the :math:`\mathcal{Q}` unitary
defined in `this <https://arxiv.org/abs/1805.00109>`__ paper.
Given a callable ``fn`` input corresponding to the :math:`\mathcal{F}` unitary in the above
paper, this function transforms the circuit into a controlled-version of the :math:`\mathcal{Q}`
unitary which forms part of the quantum Monte Carlo algorithm. In this algorithm, one of the
wires acted upon by :math:`\mathcal{F}` and specified by ``target_wire``, is used to embed a
Monte Carlo estimation problem. The :math:`\mathcal{Q}` unitary is then designed to encode the
target expectation value as a phase in one of its eigenvalues.
This function transforms to a controlled version of :math:`\mathcal{Q}` that is compatible with
quantum phase estimation (see :class:`~.QuantumPhaseEstimation` for more details).
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
control_wire (Union[Wires, int]): the control wire from the register of phase estimation
qubits
work_wires (Union[Wires, Sequence[int], or int]): additional work wires used when
decomposing :math:`\mathcal{Q}`
Returns:
function: The input function transformed to the :math:`\mathcal{Q}` unitary
Raises:
ValueError: if ``target_wire`` is not in ``wires``
"""
fn_inv = adjoint(fn)
wires = Wires(wires)
target_wire = Wires(target_wire)
control_wire = Wires(control_wire)
work_wires = Wires(work_wires)
if not wires.contains_wires(target_wire):
raise ValueError("The target wire must be contained within wires")
@wraps(fn)
def wrapper(*args, **kwargs):
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
return wrapper
|
28,000 |
def copy_entry_points(input_data, build_dir):
"""
Copy CodeChecker entry point sub-commands.
"""
package_root = os.path.join(build_dir, 'CodeChecker')
package_bin = os.path.join(package_root, 'bin')
package_bin_cmd = os.path.join(package_root, 'bin', 'cmd')
os.makedirs(package_bin_cmd)
target_cc = os.path.join(package_root, 'cc_bin')
available_commands = {}
for i in input_data:
module_name = i.split(':')[1]
input_dir = i.split(':')[0]
for input_file in glob.glob(os.path.join(input_dir, '*')):
file_name = os.path.basename(input_file)
if not file_name.endswith(".py"):
# Non-py files use the environment to appear as python
# files, they go into the folder in PATH as they are
# entry points.
if file_name.startswith("codechecker-"):
command_name = file_name.replace("codechecker-", "")
file_name = command_name.replace('-', '_')
module_path = module_name + '/' + file_name + '.py'
available_commands[command_name] = module_path
skip_content = "# DO_NOT_INSTALL_TO_PATH"
with open(input_file, 'r',
encoding="utf-8", errors="ignore") as file:
if file.readline().strip() == skip_content:
LOG.info("Registering sub-command '%s'",
command_name)
# If the file is marked not to install, do not
# install it. This happens with entry points
# whom should not act as "lowercase" entries,
# but the sub-command exists as an available
# command.
continue
LOG.info("Registering sub-command '%s' installed to "
"PATH", command_name)
# Only the CodeChecker top level script should be copied
# into the bin directory and every subcommand should be
# called through that.
if not input_file.endswith("CodeChecker"):
shutil.copy2(input_file, package_bin_cmd)
else:
shutil.copy2(input_file, package_bin)
else:
# .py files are Python code that must run in a valid env.
shutil.copy2(input_file, target_cc)
commands_json = os.path.join(target_cc, 'commands.json')
with open(commands_json, 'w',
encoding="utf-8", errors="ignore") as commands:
json.dump(available_commands, commands, sort_keys=True, indent=2)
|
def copy_entry_points(input_data, build_dir):
"""
Copy CodeChecker entry point sub-commands.
"""
package_root = os.path.join(build_dir, 'CodeChecker')
package_bin = os.path.join(package_root, 'bin')
package_bin_cmd = os.path.join(package_bin, 'cmd')
os.makedirs(package_bin_cmd)
target_cc = os.path.join(package_root, 'cc_bin')
available_commands = {}
for i in input_data:
module_name = i.split(':')[1]
input_dir = i.split(':')[0]
for input_file in glob.glob(os.path.join(input_dir, '*')):
file_name = os.path.basename(input_file)
if not file_name.endswith(".py"):
# Non-py files use the environment to appear as python
# files, they go into the folder in PATH as they are
# entry points.
if file_name.startswith("codechecker-"):
command_name = file_name.replace("codechecker-", "")
file_name = command_name.replace('-', '_')
module_path = module_name + '/' + file_name + '.py'
available_commands[command_name] = module_path
skip_content = "# DO_NOT_INSTALL_TO_PATH"
with open(input_file, 'r',
encoding="utf-8", errors="ignore") as file:
if file.readline().strip() == skip_content:
LOG.info("Registering sub-command '%s'",
command_name)
# If the file is marked not to install, do not
# install it. This happens with entry points
# whom should not act as "lowercase" entries,
# but the sub-command exists as an available
# command.
continue
LOG.info("Registering sub-command '%s' installed to "
"PATH", command_name)
# Only the CodeChecker top level script should be copied
# into the bin directory and every subcommand should be
# called through that.
if not input_file.endswith("CodeChecker"):
shutil.copy2(input_file, package_bin_cmd)
else:
shutil.copy2(input_file, package_bin)
else:
# .py files are Python code that must run in a valid env.
shutil.copy2(input_file, target_cc)
commands_json = os.path.join(target_cc, 'commands.json')
with open(commands_json, 'w',
encoding="utf-8", errors="ignore") as commands:
json.dump(available_commands, commands, sort_keys=True, indent=2)
|
17,954 |
def average_rate(target = None, varying = None, trim = None):
"""Computes the average rate of a target net income.
Given a ``target`` net income, and according to the ``varying`` gross
income. Optionally, a ``trim`` can be applied consisting on the lower and
upper bounds of the average rate to be computed.
Note:
Usually, ``target`` and ``varying`` are the same size.
Args:
target: The targeted net income.
varying: The varying gross income.
trim: The lower and upper bounds of the average rate.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
The average rate for each target.
When ``trim`` is provided, values that are out of the provided bounds
are replaced by :obj:`numpy.nan`.
"""
average_rate = 1 - target / varying
if trim is not None:
average_rate = numpy.where(average_rate <= max(trim), average_rate, numpy.nan)
average_rate = numpy.where(average_rate >= min(trim), average_rate, numpy.nan)
return average_rate
|
def average_rate(target = None, varying = None, trim = None):
"""Computes the average rate of a target net income.
Given a ``target`` net income, and according to the ``varying`` gross
income. Optionally, a ``trim`` can be applied consisting of the lower and
upper bounds of the average rate to be computed.
Note:
Usually, ``target`` and ``varying`` are the same size.
Args:
target: The targeted net income.
varying: The varying gross income.
trim: The lower and upper bounds of the average rate.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
The average rate for each target.
When ``trim`` is provided, values that are out of the provided bounds
are replaced by :obj:`numpy.nan`.
"""
average_rate = 1 - target / varying
if trim is not None:
average_rate = numpy.where(average_rate <= max(trim), average_rate, numpy.nan)
average_rate = numpy.where(average_rate >= min(trim), average_rate, numpy.nan)
return average_rate
|
30,412 |
def get_base_branch(pr_num):
"""
Fetches the base branch name of PR num {pr_num}
:param pr_num: The PR number
:return: The name of the base branch if exists
"""
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
try:
url = 'https://api.github.com/repos/demisto/content/pulls/{}'.format(pr_num)
res = requests.get(url, verify=False)
if res.status_code != 200:
# If we didn't succeed to fetch the pr maybe it doesn't exist - then we don't want the build to fail
print_warning('Unable to fetch PR num {}'.format(pr_num))
return ''
response = res.json()
# Seems like GitHub API return a dict if it's only one PR, but it doesn't mentioned in their API
if response and isinstance(response, dict):
base = response.get('base', {})
base_branch = base.get('ref')
if base_branch:
return base_branch
# GitHub usually returns a list of PRs
elif response and isinstance(response, list) and len(response) == 1:
pr = response[0]
base = pr.get('base', {})
base_branch = base.get('ref')
if base_branch:
return base_branch
except requests.exceptions.ConnectionError:
# If we didn't succeed to fetch the pr maybe it doesn't exist - then we don't want the build to fail
print_warning('Unable to fetch PR num {}'.format(pr_num))
return ''
return ''
|
def get_base_branch(pr_num):
"""
Fetches the base branch name of PR num {pr_num}
:param pr_num: The PR number
:return: The name of the base branch if exists
"""
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
try:
url = 'https://api.github.com/repos/demisto/content/pulls/{}'.format(pr_num)
res = requests.get(url, verify=False)
if res.status_code != 200:
# If we didn't succeed to fetch the pr maybe it doesn't exist - then we don't want the build to fail
print_warning('Unable to fetch PR num {}'.format(pr_num))
return ''
response = res.json()
# Seems like GitHub API return a dict if it's only one PR, but it doesn't mentioned in their API
if response and isinstance(response, dict):
base = response.get('base', {})
return base.get('ref', '')
if base_branch:
return base_branch
# GitHub usually returns a list of PRs
elif response and isinstance(response, list) and len(response) == 1:
pr = response[0]
base = pr.get('base', {})
base_branch = base.get('ref')
if base_branch:
return base_branch
except requests.exceptions.ConnectionError:
# If we didn't succeed to fetch the pr maybe it doesn't exist - then we don't want the build to fail
print_warning('Unable to fetch PR num {}'.format(pr_num))
return ''
return ''
|
31,296 |
def main():
params = demisto.params()
args = demisto.args()
api_key = params.get('apikey')
base_url = params.get('base_url')
if base_url.endswith('/'):
base_url = base_url[:-1]
indicator_types = params.get('indicator_types')
max_fetch = params.get('max_indicator_to_fetch')
tlp_color = params.get('tlp_color')
if max_fetch:
max_fetch = int(max_fetch)
else:
max_fetch = 500
try:
client = OpenCTIApiClient(base_url, api_key, ssl_verify=params.get('insecure'), log_level='error')
command = demisto.command()
demisto.info(f"Command being called is {command}")
# Switch case
if command == "fetch-indicators":
indicators = fetch_indicators_command(client, indicator_types, max_fetch, tlp_color=tlp_color)
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == "test-module":
'''When setting up an OpenCTI Client it is checked that it is valid and allows requests to be sent.
and if not he immediately sends an error'''
fetch_indicators_command(client, indicator_types, max_fetch, is_test=True)
return_outputs('ok')
elif command == "opencti-get-indicators":
return_results(get_indicators_command(client, args))
elif command == "opencti-indicator-delete":
return_results(indicator_delete_command(client, args))
elif command == "opencti-indicator-field-update":
return_results(indicator_field_update_command(client, args))
elif command == "opencti-indicator-create":
return_results(indicator_create_command(client, args))
elif command == "opencti-indicator-field-add":
return_results(indicator_field_add_command(client, args))
elif command == "opencti-indicator-field-remove":
return_results(indicator_field_remove_command(client, args))
elif command == "opencti-organization-list":
return_results(organization_list_command(client, args))
elif command == "opencti-organization-create":
return_results(organization_create_command(client, args))
except Exception as e:
return_error(f"Error [{e}]")
|
def main():
params = demisto.params()
args = demisto.args()
api_key = params.get('apikey')
base_url = params.get('base_url').strip('/')
indicator_types = params.get('indicator_types')
max_fetch = params.get('max_indicator_to_fetch')
tlp_color = params.get('tlp_color')
if max_fetch:
max_fetch = int(max_fetch)
else:
max_fetch = 500
try:
client = OpenCTIApiClient(base_url, api_key, ssl_verify=params.get('insecure'), log_level='error')
command = demisto.command()
demisto.info(f"Command being called is {command}")
# Switch case
if command == "fetch-indicators":
indicators = fetch_indicators_command(client, indicator_types, max_fetch, tlp_color=tlp_color)
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == "test-module":
'''When setting up an OpenCTI Client it is checked that it is valid and allows requests to be sent.
and if not he immediately sends an error'''
fetch_indicators_command(client, indicator_types, max_fetch, is_test=True)
return_outputs('ok')
elif command == "opencti-get-indicators":
return_results(get_indicators_command(client, args))
elif command == "opencti-indicator-delete":
return_results(indicator_delete_command(client, args))
elif command == "opencti-indicator-field-update":
return_results(indicator_field_update_command(client, args))
elif command == "opencti-indicator-create":
return_results(indicator_create_command(client, args))
elif command == "opencti-indicator-field-add":
return_results(indicator_field_add_command(client, args))
elif command == "opencti-indicator-field-remove":
return_results(indicator_field_remove_command(client, args))
elif command == "opencti-organization-list":
return_results(organization_list_command(client, args))
elif command == "opencti-organization-create":
return_results(organization_create_command(client, args))
except Exception as e:
return_error(f"Error [{e}]")
|
1,522 |
def compute_optics_graph(X, min_samples, max_eps, metric, p, metric_params,
algorithm, leaf_size, n_jobs):
"""Computes the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : array, shape (n_samples, n_features), or (n_samples, n_samples) \
if metric=’precomputed’.
A feature array, or array of distances between samples if
metric='precomputed'
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : string or callable, optional (default='minkowski')
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array, shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array, shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array, shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array, shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, 'min_samples')
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(X=X, neighbors=nbrs,
min_samples=min_samples,
working_memory=None)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# This implementation is O(n lg n) theoretically.
Heap = []
for ordering_idx in range(X.shape[0]):
Heap.append((np.inf, ordering_idx))
heapq.heapify(Heap)
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
(val, point) = heapq.heappop(Heap)
while processed[point]:
(val, point) = heapq.heappop(Heap)
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed, X=X, nbrs=nbrs,
metric=metric, metric_params=metric_params,
p=p, max_eps=max_eps, Heap=Heap)
if np.all(np.isinf(reachability_)):
warnings.warn("All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning)
return ordering, core_distances_, reachability_, predecessor_
|
def compute_optics_graph(X, min_samples, max_eps, metric, p, metric_params,
algorithm, leaf_size, n_jobs):
"""Computes the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : array, shape (n_samples, n_features), or (n_samples, n_samples) \
if metric=’precomputed’.
A feature array, or array of distances between samples if
metric='precomputed'
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, optional (default=np.inf)
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : string or callable, optional (default='minkowski')
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:class:`sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array, shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array, shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array, shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array, shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, 'min_samples')
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(X=X, neighbors=nbrs,
min_samples=min_samples,
working_memory=None)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# This implementation is O(n lg n) theoretically.
Heap = []
for ordering_idx in range(X.shape[0]):
Heap.append((np.inf, ordering_idx))
heapq.heapify(Heap)
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
val, point = heapq.heappop(Heap)
while processed[point]:
(val, point) = heapq.heappop(Heap)
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed, X=X, nbrs=nbrs,
metric=metric, metric_params=metric_params,
p=p, max_eps=max_eps, Heap=Heap)
if np.all(np.isinf(reachability_)):
warnings.warn("All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning)
return ordering, core_distances_, reachability_, predecessor_
|
53,309 |
def find_ion_saturation_current(
voltage: np.ndarray,
current: np.ndarray,
*,
fit_type: str = "exp_plus_linear",
current_bound: float = None,
voltage_bound: float = None,
) -> Tuple[ffuncs.Linear, ISatExtras]:
"""
Determines the ion-saturation current (:math:`I_{sat}`) for a given
current-voltage (IV) curve obtained from a swept Langmuir probe.
The current collected by a Langmuir probe reaches ion-saturation
when the probe is sufficiently biased so the influx of electrons is
completely repelled leading to only the collection of ions. (For
additional details see the **Notes** section below.)
**Aliases:** `find_isat_`
Parameters
----------
voltage: `numpy.ndarray`
1-D numpy array of monotonically increasing probe biases
(should be in volts).
current: `numpy.ndarray`
1-D numpy array of probe current (should be in amperes)
corresponding to the ``voltage`` array.
fit_type: `str`
The type of curve (:term:`fit-function`) to be fitted to the
Langmuir trace, valid options are listed below.
(DEFAULT ``"exp_plus_linear"``)
+-----------------------+----------------------------------------------------------+
| ``"linear"`` | `~plasmapy.analysis.fit_functions.Linear` |
+-----------------------+----------------------------------------------------------+
| ``"exponential"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` |
+-----------------------+----------------------------------------------------------+
| ``"exp_plus_linear"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusLinear` |
+-----------------------+----------------------------------------------------------+
current_bound: `float`
A fraction representing a percentile window around the minimum
current for which to collect the points. For example, a value
of ``0.1`` indicates to use all points within 10% of the
minimum current. (DEFAULT ``None``)
|
If neither ``current_bound`` or ``voltage_bound`` are specified,
then the routine will collect indices based on an internal
``current_bound`` setting for the specified ``fit_type``.
+-----------------------+--------------------------------------+
| ``"linear"`` | 0.4 |
+-----------------------+--------------------------------------+
| ``"exponential"`` | 1.0 |
+-----------------------+--------------------------------------+
| ``"exp_plus_linear"`` | 1.0 |
+-----------------------+--------------------------------------+
voltage_bound: `float`
A bias voltage (in volts) that specifies an upper bound used to
collect the points for the curve fit. That is, points that
satisfy ``voltage <= voltage_bound`` are used in the fit.
(DEFAULT ``None``)
Returns
-------
isat: `~plasmapy.analysis.fit_functions.Linear`
A :term:`fit-function` representing the linear portion of the
fitter curve.
extras: `ISatExtras`
Additional information from the curve fit:
* ``extras.fitted_func`` is the :term:`fit-function` (specified
by ``fit_type``) fitted to the IV-curve
* ``extras.rsq`` is the coefficient of determination
(r-squared) value of the ``extras.fitted_func`` to the IV-curve
* ``extras.fitted_indices`` is a `slice` object representing the
points used in the curve fit (i.e.
``(voltage[extras.fitted_indices], current[extras.fitted_indices])``).
Notes
-----
This routine works by:
1. Selecting the points to be used in the fit as determined by
``voltage_bound`` or ``current_bound``.
2. Fitting the selected points with the :term:`fit-function`
specified by ``fit_type``.
3. Extracting the linear component of the fit and returning that as
the ion-saturation current.
This routine opts to return a function representing a linear
ion-saturation current, since, while ideal planar Langmuir probes
reach a steady-state ion-saturation current, real world Langmuir
probes "suffer" from expanding sheaths as the bias voltage
increases. This sheath expansion results the ion-saturation
current also increasing.
"""
rtn_extras = ISatExtras(rsq=None, fitted_func=None, fitted_indices=None)._asdict()
_settings = {
"linear": {
"func": ffuncs.Linear,
"current_bound": 0.4,
},
"exp_plus_linear": {
"func": ffuncs.ExponentialPlusLinear,
"current_bound": 1.0,
},
"exp_plus_offset": {
"func": ffuncs.ExponentialPlusOffset,
"current_bound": 1.0,
},
}
try:
default_current_bound = _settings[fit_type]["current_bound"]
fit_func = _settings[fit_type]["func"]()
rtn_extras["fitted_func"] = fit_func
except KeyError:
raise ValueError(
f"Requested fit '{fit_type}' is not a valid option. Valid options "
f"are {list(_settings.keys())}."
)
# check voltage and current arrays
voltage, current = check_sweep(voltage, current, strip_units=True)
# condition kwargs voltage_bound and current_bound
if voltage_bound is None and current_bound is None:
current_bound = default_current_bound
elif voltage_bound is not None and current_bound is not None:
raise ValueError(
"Both keywords 'current_bound' and `voltage_bound' are specified, "
"use only one."
)
if current_bound is not None:
if not isinstance(current_bound, numbers.Real):
raise TypeError(
f"Keyword 'current_bound' is of type {type(current_bound)}, "
f"expected an int or float."
)
current_min = current.min()
current_bound = (1.0 - current_bound) * current_min
mask = np.where(current <= current_bound)[0]
else: # voltage_bound is not None
if not isinstance(voltage_bound, numbers.Real):
raise TypeError(
f"Keyword 'voltage_bound' is of type {type(voltage_bound)}, "
f"expected an int or float."
)
mask = np.where(voltage <= voltage_bound)[0]
if mask.size == 0:
raise ValueError(
f"The specified bounding keywords, 'voltage_bound' "
f"({voltage_bound}) and 'current_bound' ({current_bound}), "
f"resulted in a fit window containing no points."
)
mask = slice(0, mask[-1] + 1)
rtn_extras["fitted_indices"] = mask
volt_sub = voltage[mask]
curr_sub = current[mask]
fit_func.curve_fit(volt_sub, curr_sub)
rtn_extras["rsq"] = fit_func.rsq
m = getattr(fit_func.params, "m", 0.0)
b = getattr(fit_func.params, "b", 0.0)
m_err = getattr(fit_func.param_errors, "m", 0.0)
b_err = getattr(fit_func.param_errors, "b", 0.0)
isat = ffuncs.Linear(params=(m, b), param_errors=(m_err, b_err))
return isat, ISatExtras(**rtn_extras)
|
def find_ion_saturation_current(
voltage: np.ndarray,
current: np.ndarray,
*,
fit_type: str = "exp_plus_linear",
current_bound: float = None,
voltage_bound: float = None,
) -> Tuple[ffuncs.Linear, ISatExtras]:
"""
Determines the ion-saturation current (:math:`I_{sat}`) for a given
current-voltage (IV) curve obtained from a swept Langmuir probe.
The current collected by a Langmuir probe reaches ion-saturation
when the probe is sufficiently biased so the influx of electrons is
completely repelled, which leads to only the collection of ions. (For
additional details see the **Notes** section below.)
**Aliases:** `find_isat_`
Parameters
----------
voltage: `numpy.ndarray`
1-D numpy array of monotonically increasing probe biases
(should be in volts).
current: `numpy.ndarray`
1-D numpy array of probe current (should be in amperes)
corresponding to the ``voltage`` array.
fit_type: `str`
The type of curve (:term:`fit-function`) to be fitted to the
Langmuir trace, valid options are listed below.
(DEFAULT ``"exp_plus_linear"``)
+-----------------------+----------------------------------------------------------+
| ``"linear"`` | `~plasmapy.analysis.fit_functions.Linear` |
+-----------------------+----------------------------------------------------------+
| ``"exponential"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusOffset` |
+-----------------------+----------------------------------------------------------+
| ``"exp_plus_linear"`` | `~plasmapy.analysis.fit_functions.ExponentialPlusLinear` |
+-----------------------+----------------------------------------------------------+
current_bound: `float`
A fraction representing a percentile window around the minimum
current for which to collect the points. For example, a value
of ``0.1`` indicates to use all points within 10% of the
minimum current. (DEFAULT ``None``)
|
If neither ``current_bound`` or ``voltage_bound`` are specified,
then the routine will collect indices based on an internal
``current_bound`` setting for the specified ``fit_type``.
+-----------------------+--------------------------------------+
| ``"linear"`` | 0.4 |
+-----------------------+--------------------------------------+
| ``"exponential"`` | 1.0 |
+-----------------------+--------------------------------------+
| ``"exp_plus_linear"`` | 1.0 |
+-----------------------+--------------------------------------+
voltage_bound: `float`
A bias voltage (in volts) that specifies an upper bound used to
collect the points for the curve fit. That is, points that
satisfy ``voltage <= voltage_bound`` are used in the fit.
(DEFAULT ``None``)
Returns
-------
isat: `~plasmapy.analysis.fit_functions.Linear`
A :term:`fit-function` representing the linear portion of the
fitter curve.
extras: `ISatExtras`
Additional information from the curve fit:
* ``extras.fitted_func`` is the :term:`fit-function` (specified
by ``fit_type``) fitted to the IV-curve
* ``extras.rsq`` is the coefficient of determination
(r-squared) value of the ``extras.fitted_func`` to the IV-curve
* ``extras.fitted_indices`` is a `slice` object representing the
points used in the curve fit (i.e.
``(voltage[extras.fitted_indices], current[extras.fitted_indices])``).
Notes
-----
This routine works by:
1. Selecting the points to be used in the fit as determined by
``voltage_bound`` or ``current_bound``.
2. Fitting the selected points with the :term:`fit-function`
specified by ``fit_type``.
3. Extracting the linear component of the fit and returning that as
the ion-saturation current.
This routine opts to return a function representing a linear
ion-saturation current, since, while ideal planar Langmuir probes
reach a steady-state ion-saturation current, real world Langmuir
probes "suffer" from expanding sheaths as the bias voltage
increases. This sheath expansion results the ion-saturation
current also increasing.
"""
rtn_extras = ISatExtras(rsq=None, fitted_func=None, fitted_indices=None)._asdict()
_settings = {
"linear": {
"func": ffuncs.Linear,
"current_bound": 0.4,
},
"exp_plus_linear": {
"func": ffuncs.ExponentialPlusLinear,
"current_bound": 1.0,
},
"exp_plus_offset": {
"func": ffuncs.ExponentialPlusOffset,
"current_bound": 1.0,
},
}
try:
default_current_bound = _settings[fit_type]["current_bound"]
fit_func = _settings[fit_type]["func"]()
rtn_extras["fitted_func"] = fit_func
except KeyError:
raise ValueError(
f"Requested fit '{fit_type}' is not a valid option. Valid options "
f"are {list(_settings.keys())}."
)
# check voltage and current arrays
voltage, current = check_sweep(voltage, current, strip_units=True)
# condition kwargs voltage_bound and current_bound
if voltage_bound is None and current_bound is None:
current_bound = default_current_bound
elif voltage_bound is not None and current_bound is not None:
raise ValueError(
"Both keywords 'current_bound' and `voltage_bound' are specified, "
"use only one."
)
if current_bound is not None:
if not isinstance(current_bound, numbers.Real):
raise TypeError(
f"Keyword 'current_bound' is of type {type(current_bound)}, "
f"expected an int or float."
)
current_min = current.min()
current_bound = (1.0 - current_bound) * current_min
mask = np.where(current <= current_bound)[0]
else: # voltage_bound is not None
if not isinstance(voltage_bound, numbers.Real):
raise TypeError(
f"Keyword 'voltage_bound' is of type {type(voltage_bound)}, "
f"expected an int or float."
)
mask = np.where(voltage <= voltage_bound)[0]
if mask.size == 0:
raise ValueError(
f"The specified bounding keywords, 'voltage_bound' "
f"({voltage_bound}) and 'current_bound' ({current_bound}), "
f"resulted in a fit window containing no points."
)
mask = slice(0, mask[-1] + 1)
rtn_extras["fitted_indices"] = mask
volt_sub = voltage[mask]
curr_sub = current[mask]
fit_func.curve_fit(volt_sub, curr_sub)
rtn_extras["rsq"] = fit_func.rsq
m = getattr(fit_func.params, "m", 0.0)
b = getattr(fit_func.params, "b", 0.0)
m_err = getattr(fit_func.param_errors, "m", 0.0)
b_err = getattr(fit_func.param_errors, "b", 0.0)
isat = ffuncs.Linear(params=(m, b), param_errors=(m_err, b_err))
return isat, ISatExtras(**rtn_extras)
|
39,148 |
def wavernn(checkpoint_name: str) -> WaveRNN:
r"""Get pretrained WaveRNN model.
Args:
checkpoint_name (str): The name of the checkpoint to load. Available checkpoints:
- ``"wavernn_10k_epochs_8bits_ljspeech"``:
WaveRNN model trained with 10k epochs and 8 bits depth waveform on the LJSpeech dataset.
The model is trained using the default parameters and code of the
`examples/pipeline_wavernn/main.py
<https://github.com/pytorch/audio/tree/master/examples/pipeline_wavernn>`_.
"""
if checkpoint_name not in _MODEL_CONFIG_AND_URLS:
raise ValueError(
f"Unexpected checkpoint_name: '{checkpoint_name}'. "
f"Valid choices are; {list(_MODEL_CONFIG_AND_URLS.keys())}")
url, configs = _MODEL_CONFIG_AND_URLS[checkpoint_name]
model = WaveRNN(**configs)
state_dict = load_state_dict_from_url(url, progress=False)
model.load_state_dict(state_dict)
return model
|
def wavernn(checkpoint_name: str) -> WaveRNN:
r"""Get pretrained WaveRNN model.
Args:
return torch.stack(output).permute(1, 2, 0)
- ``"wavernn_10k_epochs_8bits_ljspeech"``:
WaveRNN model trained with 10k epochs and 8 bits depth waveform on the LJSpeech dataset.
The model is trained using the default parameters and code of the
`examples/pipeline_wavernn/main.py
<https://github.com/pytorch/audio/tree/master/examples/pipeline_wavernn>`_.
"""
if checkpoint_name not in _MODEL_CONFIG_AND_URLS:
raise ValueError(
f"Unexpected checkpoint_name: '{checkpoint_name}'. "
f"Valid choices are; {list(_MODEL_CONFIG_AND_URLS.keys())}")
url, configs = _MODEL_CONFIG_AND_URLS[checkpoint_name]
model = WaveRNN(**configs)
state_dict = load_state_dict_from_url(url, progress=False)
model.load_state_dict(state_dict)
return model
|
41,367 |
def lazy_read_iiasa(
file, name, default=True, meta=True, creds=None, base_url=_AUTH_URL, **kwargs
):
"""Check if the file in a given location is an up-to-date version of an IIASA
database. If so, load it. If not, load data from IIASA scenario explorer and
save to that location. Does not check that the previously read version is a complete
instance of the database.
Parameters
----------
file : str
The location to test for valid data and save the data if not up-to-date.
name : str
A valid name of an IIASA scenario explorer instance,
see :attr:`pyam.iiasa.Connection().valid_connections`
default : bool, optional
Return *only* the default version of each scenario.
Any (`model`, `scenario`) without a default version is omitted.
If :obj:`False`, return all versions.
meta : bool or list of strings, optional
If `True`, include all meta categories & quantitative indicators
(or subset if list is given).
creds : str, :class:`pathlib.Path`, list-like, or dict, optional
| Credentials (username & password) are not required to access
any public Scenario Explorer instances (i.e., with Guest login).
| See :class:`pyam.iiasa.Connection` for details.
| Use :meth:`pyam.iiasa.set_config` to set credentials
for accessing private/restricted Scenario Explorer instances.
base_url : str
Authentication server URL
kwargs
Arguments for :meth:`pyam.iiasa.Connection.query`
"""
assert file[-4:] == ".csv", "We will only read and write to csv format."
if os.path.exists(file):
date_set = pd.to_datetime(os.path.getmtime(file), unit="s")
version_info = Connection(name, creds, base_url).properties()
latest_new = np.nanmax(pd.to_datetime(version_info["create_date"]))
latest_update = np.nanmax(pd.to_datetime(version_info["update_date"]))
latest = pd.Series([latest_new, latest_update]).max()
if latest < date_set:
old_read = IamDataFrame(file)
if kwargs:
old_read = old_read.filter(**kwargs)
print("Database read from file")
return old_read
else:
print("Database out of date and will be re-downloaded")
# If we get here, we need to redownload the database
new_read = read_iiasa(
name, meta=True, default=default, creds=None, base_url=_AUTH_URL, **kwargs
)
Path(file).parent.mkdir(parents=True, exist_ok=True)
new_read.to_csv(file)
return new_read
|
def lazy_read_iiasa(
file, name, default=True, meta=True, creds=None, base_url=_AUTH_URL, **kwargs
):
"""Check if the file in a given location is an up-to-date version of an IIASA
database. If so, load it. If not, load data from IIASA scenario explorer and
save to that location. Does not check that the previously read version is a complete
instance of the database.
Parameters
----------
file : str
The location to test for valid data and save the data if not up-to-date.
name : str
A valid name of an IIASA scenario explorer instance,
see :attr:`pyam.iiasa.Connection().valid_connections`
default : bool, optional
Return *only* the default version of each scenario.
Any (`model`, `scenario`) without a default version is omitted.
If :obj:`False`, return all versions.
meta : bool or list of strings, optional
If `True`, include all meta categories & quantitative indicators
(or subset if list is given).
creds : str or :class:`pathlib.Path`, optional
| Credentials (username & password) are not required to access
any public Scenario Explorer instances (i.e., with Guest login).
| See :class:`pyam.iiasa.Connection` for details.
| Use :meth:`pyam.iiasa.set_config` to set credentials
for accessing private/restricted Scenario Explorer instances.
base_url : str
Authentication server URL
kwargs
Arguments for :meth:`pyam.iiasa.Connection.query`
"""
assert file[-4:] == ".csv", "We will only read and write to csv format."
if os.path.exists(file):
date_set = pd.to_datetime(os.path.getmtime(file), unit="s")
version_info = Connection(name, creds, base_url).properties()
latest_new = np.nanmax(pd.to_datetime(version_info["create_date"]))
latest_update = np.nanmax(pd.to_datetime(version_info["update_date"]))
latest = pd.Series([latest_new, latest_update]).max()
if latest < date_set:
old_read = IamDataFrame(file)
if kwargs:
old_read = old_read.filter(**kwargs)
print("Database read from file")
return old_read
else:
print("Database out of date and will be re-downloaded")
# If we get here, we need to redownload the database
new_read = read_iiasa(
name, meta=True, default=default, creds=None, base_url=_AUTH_URL, **kwargs
)
Path(file).parent.mkdir(parents=True, exist_ok=True)
new_read.to_csv(file)
return new_read
|
10,416 |
def main():
argument_spec = dict(
resource=dict(required=True),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent', 'list']),
)
required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
resource = module.params['resource']
tags = module.params['tags']
state = module.params['state']
purge_tags = module.params['purge_tags']
result = {'changed': False}
ec2 = module.client('ec2')
current_tags = get_tags(ec2, module, resource)
if state == 'list':
module.deprecate(
'Using the "list" state has been deprecated. Please use the ec2_tag_infoi module instead', version='2.14')
module.exit_json(changed=False, tags=current_tags)
add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
remove_tags = {}
if state == 'absent':
for key in tags:
if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
remove_tags[key] = current_tags[key]
for key in remove:
remove_tags[key] = current_tags[key]
if remove_tags:
result['changed'] = True
result['removed_tags'] = remove_tags
if not module.check_mode:
try:
ec2.delete_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(remove_tags))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
if state == 'present' and add_tags:
result['changed'] = True
result['added_tags'] = add_tags
current_tags.update(add_tags)
if not module.check_mode:
try:
ec2.create_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(add_tags))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
result['tags'] = get_tags(ec2, module, resource)
module.exit_json(**result)
|
def main():
argument_spec = dict(
resource=dict(required=True),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent', 'list']),
)
required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
resource = module.params['resource']
tags = module.params['tags']
state = module.params['state']
purge_tags = module.params['purge_tags']
result = {'changed': False}
ec2 = module.client('ec2')
current_tags = get_tags(ec2, module, resource)
if state == 'list':
module.deprecate(
'Using the "list" state has been deprecated. Please use the ec2_tag_info module instead', version='2.14')
module.exit_json(changed=False, tags=current_tags)
add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
remove_tags = {}
if state == 'absent':
for key in tags:
if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
remove_tags[key] = current_tags[key]
for key in remove:
remove_tags[key] = current_tags[key]
if remove_tags:
result['changed'] = True
result['removed_tags'] = remove_tags
if not module.check_mode:
try:
ec2.delete_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(remove_tags))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
if state == 'present' and add_tags:
result['changed'] = True
result['added_tags'] = add_tags
current_tags.update(add_tags)
if not module.check_mode:
try:
ec2.create_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(add_tags))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
result['tags'] = get_tags(ec2, module, resource)
module.exit_json(**result)
|
35,279 |
def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-7,
sparsity_coefficients=[], fixed_modes=[],hals='approx',
verbose=False, return_errors=False):
"""
Non-negative CP decomposition
Uses HALS which updates each factor columnwise, fixing every other columns, see [1]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients: array of float (of length the number of modes)
The sparsity coefficients on each factor.
If set to None, the algorithm is computed without sparsity
Default: [],
fixed_modes: array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: []
verbose: boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors: boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
toc: list
A list with accumulated time at each iterations
fixed_modes = [], normalize = [False, False, False],
verbose = True, return_errors = False)
References
----------
[1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and
Hierarchical ALS Algorithms for Nonnegative Matrix Factorization,
Neural Computation 24 (4): 1085-1105, 2012.
"""
weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd,
random_state=None,
normalize_factors=False)
norm_tensor = tl.norm(tensor, 2)
nb_modes = len(tensor.shape)
if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes:
#print(
# "Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None.")
sparsity_coefficients = [None for i in range(nb_modes)]
if fixed_modes == None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
# initialisation - declare local varaibles
rec_errors = []
# Iteratation
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes_list:
# Computing Hadamard of cross-products
pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor))
for i, factor in enumerate(factors):
if i != mode:
pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor)
if not iteration and weights is not None:
# Take into account init weights
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
else:
mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)
# Call the hals resolution with nnls, optimizing the current mode
if hals=='approx':
factors[mode] = tl.transpose(
hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0])
elif hals=='exact':
factors[mode] = tl.transpose(
hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=5000)[0])
if tol:
factors_norm = cp_norm((weights, factors))
iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
|
def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-7,
sparsity_coefficients=[], fixed_modes=[],hals='approx',
verbose=False, return_errors=False):
"""
Non-negative CP decomposition via HALS
Uses HALS which updates each factor columnwise, fixing every other columns, see [1]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients: array of float (of length the number of modes)
The sparsity coefficients on each factor.
If set to None, the algorithm is computed without sparsity
Default: [],
fixed_modes: array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: []
verbose: boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors: boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
toc: list
A list with accumulated time at each iterations
fixed_modes = [], normalize = [False, False, False],
verbose = True, return_errors = False)
References
----------
[1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and
Hierarchical ALS Algorithms for Nonnegative Matrix Factorization,
Neural Computation 24 (4): 1085-1105, 2012.
"""
weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd,
random_state=None,
normalize_factors=False)
norm_tensor = tl.norm(tensor, 2)
nb_modes = len(tensor.shape)
if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes:
#print(
# "Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None.")
sparsity_coefficients = [None for i in range(nb_modes)]
if fixed_modes == None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
# initialisation - declare local varaibles
rec_errors = []
# Iteratation
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes_list:
# Computing Hadamard of cross-products
pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor))
for i, factor in enumerate(factors):
if i != mode:
pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor)
if not iteration and weights is not None:
# Take into account init weights
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
else:
mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)
# Call the hals resolution with nnls, optimizing the current mode
if hals=='approx':
factors[mode] = tl.transpose(
hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0])
elif hals=='exact':
factors[mode] = tl.transpose(
hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=5000)[0])
if tol:
factors_norm = cp_norm((weights, factors))
iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
|
31,790 |
def determine_correct_format(time, fmt):
time = datetime.datetime.strptime(time, fmt)
return time
|
def determine_correct_format(time, fmt):
time = datetime.strptime(time, fmt)
return time
|
10,303 |
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
host_file_list.append("/etc/openssh/ssh_known_hosts")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
with open(hf) as host_fh:
data = host_fh.readlines()
except IOError:
hfiles_not_found += 1
continue
for line in data:
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except Exception:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
|
def not_in_host_file(self, host):
if 'USER' in os.environ:
user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
else:
user_host_file = "~/.ssh/known_hosts"
user_host_file = os.path.expanduser(user_host_file)
host_file_list = []
host_file_list.append(user_host_file)
host_file_list.append("/etc/ssh/ssh_known_hosts")
host_file_list.append("/etc/ssh/ssh_known_hosts2")
host_file_list.append("/etc/openssh/ssh_known_hosts")
hfiles_not_found = 0
for hf in host_file_list:
if not os.path.exists(hf):
hfiles_not_found += 1
continue
try:
with open(hf) as host_fh:
data = host_fh.readlines()
except IOError:
hfiles_not_found += 1
continue
for line in data.split("\n"):
if line is None or " " not in line:
continue
tokens = line.split()
if tokens[0].find(HASHED_KEY_MAGIC) == 0:
# this is a hashed known host entry
try:
(kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
hash.update(host)
if hash.digest() == kn_host.decode('base64'):
return False
except Exception:
# invalid hashed host key, skip it
continue
else:
# standard host file entry
if host in tokens[0]:
return False
return True
|
40,526 |
def show_extension_version():
print(f'Current version: 0.2.0')
|
def show_extension_version():
print('Current version: 0.2.0')
|
33,776 |
def test_error_serialization(ray_start_regular_shared):
"""Test that errors will be serialized properly."""
with pytest.raises(PermissionError):
with ray_start_client_server() as ray:
@ray.remote
def g():
with open("/dev/asdf", "w") as f:
f.write("HI")
# grpc error
ray.get(g.remote())
|
def test_error_serialization(ray_start_regular_shared):
"""Test that errors will be serialized properly."""
with pytest.raises(PermissionError):
with ray_start_client_server() as ray:
@ray.remote
def g():
with open("/dev/asdf", "w") as f:
f.write("HI")
# Raises a PermissionError
ray.get(g.remote())
|
4,116 |
def p_c_base_type(s, nonempty = 0, templates = None):
if s.sy == '(':
return p_c_complex_base_type(s, templates = templates)
else:
return p_c_simple_base_type(s, nonempty = nonempty, templates = templates)
|
def p_c_base_type(s, nonempty = 0, templates = None):
if s.sy == '(':
return p_c_complex_base_type(s, templates = templates)
else:
return p_c_simple_base_type(s, nonempty=nonempty, templates=templates)
|
32,169 |
def get_event_types(client, method, token):
"""
Call the client module to fetch event types using the input parameters
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param token: server access token
:return: alert event types
"""
eTypeAlias = {}
params = {
'token': token
}
eventtypes_url = r'/api/v2/events/types'
eventTypes = client.get_event_types(method, eventtypes_url, params)
if eventTypes != None:
for eachone in eventTypes:
eTypeAlias[eachone['type']] = eachone['alias']
return eTypeAlias
|
def get_event_types(client, method, token):
"""
Call the client module to fetch event types using the input parameters
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param token: server access token
:return: alert event types
"""
eTypeAlias = {}
params = {
'token': token
}
eventtypes_url = r'/api/v2/events/types'
eventTypes = client.get_event_types(method, eventtypes_url, params)
if eventTypes != None:
for eachone in eventTypes:
eTypeAlias[eachone['type']] = eachone.get('alias')
return eTypeAlias
|
795 |
def finite_check(f, x):
def check_fx(exprs, x):
return x not in exprs.free_symbols
def check_sincos(expr, x):
if type(expr) == sin or type(expr) == cos:
a = Wild('a', properties=[lambda k: x not in k.free_symbols, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols or k == S.Zero, ])
sincos_args = expr.args[0]
if sincos_args.match(a * x + b) is not None:
return True
else:
return False
expr = sincos_to_sum(TR2(TR1(f)))
res_expr = S.Zero
add_coeff = expr.as_coeff_add()
res_expr += add_coeff[0]
for s in add_coeff[1]:
mul_coeffs = s.as_coeff_mul()[1]
for t in mul_coeffs:
if not(check_fx(t, x) or check_sincos(t, x)):
return False, f
res_expr += TR10(s)
a = Wild('a', properties=[lambda k:k.is_Integer, lambda k:k != 0, ])
return True, res_expr.collect([sin(a*x), cos(a*x)])
|
def finite_check(f, x):
def check_fx(exprs, x):
return x not in exprs.free_symbols
def check_sincos(expr, x):
if type(expr) == sin or type(expr) == cos:
a = Wild('a', properties=[lambda k: x not in k.free_symbols, ])
b = Wild('b', properties=[lambda k: x not in k.free_symbols or k == S.Zero, ])
sincos_args = expr.args[0]
if sincos_args.match(a * x + b) is not None:
return True
else:
return False
expr = sincos_to_sum(TR2(TR1(f)))
res_expr = S.Zero
add_coeff = expr.as_coeff_add()
res_expr += add_coeff[0]
for s in add_coeff[1]:
mul_coeffs = s.as_coeff_mul()[1]
for t in mul_coeffs:
if not (check_fx(t, x) or check_sincos(t, x)):
return False, f
res_expr += TR10(s)
a = Wild('a', properties=[lambda k:k.is_Integer, lambda k:k != 0, ])
return True, res_expr.collect([sin(a*x), cos(a*x)])
|
43,670 |
def _proc_wires(wires, n_wires=None):
r"""
Checks and processes custom user wire mapping into a consistent, direction-free, Wires format.
Used for converting between OpenFermion qubit numbering and Pennylane wire labels.
Since OpenFermion's quibit numbering is always consecutive int, simple iterable types such as
list, tuple, or Wires can be used to specify the qubit<->wire mapping with indices acting as
qubits. Dict can also be used as a mapping, but does not provide any advantage over lists other
than the ability to do partial mapping/permutation in the qubit->wire direction.
It is recommended pass Wires/list/tuple `wires` since it's direction-free, i.e. the same `wires`
argument can be used to convert both ways between OpenFermion and Pennylane. Only use dict for
partial or unordered mapping.
**Example usage:**
>>> # consec int wires if no wires mapping provided, ie. identity map: 0<->0, 1<->1, 2<->2
>>> _proc_wires(None, 3)
<Wires = [0, 1, 2]>
>>> # List as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2
>>> _proc_wires(['w0','w1','w2'])
<Wires = ['w0', 'w1', 'w2']>
>>> # Wires as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2
>>> _proc_wires(Wires(['w0', 'w1', 'w2']))
<Wires = ['w0', 'w1', 'w2']>
>>> # Dict as partial mapping, int qubits keys to wire label values: 0->w0, 1 unchanged, 2->w2
>>> _proc_wires({0:'w0',2:'w2'})
<Wires = ['w0', 1, 'w2']>
>>> # Dict as mapping, wires label keys to consec int qubit values: w2->2, w0->0, w1->1
>>> _proc_wires({'w2':2, 'w0':0, 'w1':1})
<Wires = ['w0', 'w1', 'w2']>
Args:
wires (Wires, list, tuple, dict): User wire labels or mapping for Pennylane ansatz.
For types Wires, list, or tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) or
consecutive-int-valued dict (for wire-to-qubit conversion) is accepted.
If None, will be set to consecutive int based on ``n_wires``.
n_wires (int): Number of wires used if known. If None, will infer from ``wires``; if
``wires`` is not available, will be set to 1. Defaults to None.
Returns:
Wires: Cleaned wire mapping with indices corresponding to qubits and values
corresponding to wire labels.
"""
# infer from wires, or assume 1 if wires is not of accepted types.
if n_wires is None:
n_wires = len(wires) if isinstance(wires, (Wires, list, tuple, dict)) else 1
# defaults to no mapping.
if wires is None:
return Wires(range(n_wires))
if isinstance(wires, (Wires, list, tuple)):
# does not care about the tail if more wires are provided than n_wires.
wires = Wires(wires[:n_wires])
elif isinstance(wires, dict):
if all([isinstance(w, int) for w in wires.keys()]):
# Assuming keys are taken from consecutive int wires. Allows for partial mapping.
n_wires = max(wires) + 1
labels = list(range(n_wires)) # used for completing potential partial mapping.
for k, v in wires.items():
if k < n_wires:
labels[k] = v
wires = Wires(labels)
elif set(range(n_wires)).issubset(set(wires.values())):
# Assuming values are consecutive int wires (up to n_wires, ignores the rest).
# Does NOT allow for partial mapping.
wires = {v: k for k, v in wires.items()} # flip for easy indexing
wires = Wires([wires[i] for i in range(n_wires)])
else:
raise ValueError("Expected only int-keyed or consecutive int-valued dict for `wires`")
else:
raise ValueError(
"Expected type Wires, list, tuple, or dict for `wires`, got {}".format(type(wires))
)
if len(wires) != n_wires:
# check length consistency when all checking and cleaning are done.
raise ValueError(
"Length of `wires` ({}) does not match `n_wires` ({})".format(len(wires), n_wires)
)
return wires
|
def _proc_wires(wires, n_wires=None):
r"""
Checks and processes custom user wire mapping into a consistent, direction-free, Wires format.
Used for converting between OpenFermion qubit numbering and Pennylane wire labels.
Since OpenFermion's quibit numbering is always consecutive int, simple iterable types such as
list, tuple, or Wires can be used to specify the qubit<->wire mapping with indices acting as
qubits. Dict can also be used as a mapping, but does not provide any advantage over lists other
than the ability to do partial mapping/permutation in the qubit->wire direction.
It is recommended pass Wires/list/tuple `wires` since it's direction-free, i.e. the same `wires`
argument can be used to convert both ways between OpenFermion and Pennylane. Only use dict for
partial or unordered mapping.
**Example usage:**
>>> # consec int wires if no wires mapping provided, ie. identity map: 0<->0, 1<->1, 2<->2
>>> _proc_wires(None, 3)
<Wires = [0, 1, 2]>
>>> # List as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2
>>> _proc_wires(['w0','w1','w2'])
<Wires = ['w0', 'w1', 'w2']>
>>> # Wires as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2
>>> _proc_wires(Wires(['w0', 'w1', 'w2']))
<Wires = ['w0', 'w1', 'w2']>
>>> # Dict as partial mapping, int qubits keys to wire label values: 0->w0, 1 unchanged, 2->w2
>>> _proc_wires({0:'w0',2:'w2'})
<Wires = ['w0', 1, 'w2']>
>>> # Dict as mapping, wires label keys to consec int qubit values: w2->2, w0->0, w1->1
>>> _proc_wires({'w2':2, 'w0':0, 'w1':1})
<Wires = ['w0', 'w1', 'w2']>
Args:
wires (Wires, list, tuple, dict): User wire labels or mapping for Pennylane ansatz.
For types Wires, list, or tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) or
consecutive-int-valued dict (for wire-to-qubit conversion) is accepted.
If None, will be set to consecutive int based on ``n_wires``.
n_wires (int): Number of wires used if known. If None, will be inferred from ``wires``; if
``wires`` is not available, will be set to 1. Defaults to None.
Returns:
Wires: Cleaned wire mapping with indices corresponding to qubits and values
corresponding to wire labels.
"""
# infer from wires, or assume 1 if wires is not of accepted types.
if n_wires is None:
n_wires = len(wires) if isinstance(wires, (Wires, list, tuple, dict)) else 1
# defaults to no mapping.
if wires is None:
return Wires(range(n_wires))
if isinstance(wires, (Wires, list, tuple)):
# does not care about the tail if more wires are provided than n_wires.
wires = Wires(wires[:n_wires])
elif isinstance(wires, dict):
if all([isinstance(w, int) for w in wires.keys()]):
# Assuming keys are taken from consecutive int wires. Allows for partial mapping.
n_wires = max(wires) + 1
labels = list(range(n_wires)) # used for completing potential partial mapping.
for k, v in wires.items():
if k < n_wires:
labels[k] = v
wires = Wires(labels)
elif set(range(n_wires)).issubset(set(wires.values())):
# Assuming values are consecutive int wires (up to n_wires, ignores the rest).
# Does NOT allow for partial mapping.
wires = {v: k for k, v in wires.items()} # flip for easy indexing
wires = Wires([wires[i] for i in range(n_wires)])
else:
raise ValueError("Expected only int-keyed or consecutive int-valued dict for `wires`")
else:
raise ValueError(
"Expected type Wires, list, tuple, or dict for `wires`, got {}".format(type(wires))
)
if len(wires) != n_wires:
# check length consistency when all checking and cleaning are done.
raise ValueError(
"Length of `wires` ({}) does not match `n_wires` ({})".format(len(wires), n_wires)
)
return wires
|
49,010 |
def build_component_list(compdict, custom=None, convert=update_classpath):
"""Compose a component list from a { class: order } dictionary."""
def _check_components(complist):
if len({convert(c) for c in complist}) != len(complist):
raise ValueError('Some paths in {complist!r} convert to the same object, '
'please update your settings')
def _map_keys(compdict):
if isinstance(compdict, BaseSettings):
compbs = BaseSettings()
for k, v in compdict.items():
prio = compdict.getpriority(k)
if compbs.getpriority(convert(k)) == prio:
raise ValueError(f'Some paths in {list(compdict.keys())!r} '
'convert to the same '
'object, please update your settings'
)
else:
compbs.set(convert(k), v, priority=prio)
return compbs
else:
_check_components(compdict)
return {convert(k): v for k, v in compdict.items()}
def _validate_values(compdict):
"""Fail if a value in the components dict is not a real number or None."""
for name, value in compdict.items():
if value is not None and not isinstance(value, numbers.Real):
raise ValueError(f'Invalid value {value} for component {name}, '
'please provide a real number or None instead'
)
# BEGIN Backward compatibility for old (base, custom) call signature
if isinstance(custom, (list, tuple)):
_check_components(custom)
return type(custom)(convert(c) for c in custom)
if custom is not None:
compdict.update(custom)
# END Backward compatibility
_validate_values(compdict)
compdict = without_none_values(_map_keys(compdict))
return [k for k, v in sorted(compdict.items(), key=itemgetter(1))]
|
def build_component_list(compdict, custom=None, convert=update_classpath):
"""Compose a component list from a { class: order } dictionary."""
def _check_components(complist):
if len({convert(c) for c in complist}) != len(complist):
raise ValueError('Some paths in {complist!r} convert to the same object, '
'please update your settings')
def _map_keys(compdict):
if isinstance(compdict, BaseSettings):
compbs = BaseSettings()
for k, v in compdict.items():
prio = compdict.getpriority(k)
if compbs.getpriority(convert(k)) == prio:
raise ValueError(f'Some paths in {list(compdict.keys())!r} '
'convert to the same '
'object, please update your settings'
)
else:
compbs.set(convert(k), v, priority=prio)
return compbs
else:
_check_components(compdict)
return {convert(k): v for k, v in compdict.items()}
def _validate_values(compdict):
"""Fail if a value in the components dict is not a real number or None."""
for name, value in compdict.items():
if value is not None and not isinstance(value, numbers.Real):
raise ValueError(f'Invalid value {value} for component {name}, '
'please provide a real number or None instead')
# BEGIN Backward compatibility for old (base, custom) call signature
if isinstance(custom, (list, tuple)):
_check_components(custom)
return type(custom)(convert(c) for c in custom)
if custom is not None:
compdict.update(custom)
# END Backward compatibility
_validate_values(compdict)
compdict = without_none_values(_map_keys(compdict))
return [k for k, v in sorted(compdict.items(), key=itemgetter(1))]
|
57,648 |
def splunk_job_status(service):
job = service.job(demisto.args()['sid'])
status = job.state.content['dispatchState']
entry_context = {
'SID': demisto.args()['sid'],
'Status': status
}
context = {'Splunk.JobStatus(val.ID && val.ID === obj.ID)': entry_context}
human_readable = tableToMarkdown('Splunk Job Status', entry_context)
demisto.results({
"Type": 1,
"Contents": 'Splunk Job Status',
"ContentsFormat": "json",
"EntryContext": context,
"HumanReadable": human_readable
})
|
def splunk_job_status(service):
job_sid = demisto.args().get('sid')
job = service.job(job_sid)
status = job.state.content.get('dispatchState')
entry_context = {
'SID': job_sid,
'Status': status
}
context = {'Splunk.JobStatus(val.ID && val.ID === obj.ID)': entry_context}
human_readable = tableToMarkdown('Splunk Job Status', entry_context)
demisto.results({
"Type": 1,
"Contents": 'Splunk Job Status',
"ContentsFormat": "json",
"EntryContext": context,
"HumanReadable": human_readable
})
|
35,354 |
def convert_script(
filename_in,
filename_out,
loglevel="WARNING",
auto_exit=True,
line_ending=None,
exec_file=None,
macros_as_functions=True,
use_function_names=True,
show_log = False,
add_imports = True,
comment_solve = False,
format_output = True,
header = True
):
"""Converts an ANSYS input file to a python PyMAPDL script.
Parameters
----------
filename_in : str
Filename of the ansys input file to read in.
filename_out : str
Filename of the python script to write a translation to.
loglevel : str, optional
Logging level of the ansys object within the script.
auto_exit : bool, optional
Adds a line to the end of the script to exit MAPDL. Default
``True``.
line_ending : str, optional
When None, automatically determined by OS being used.
macros_as_functions : bool, optional
Attempt to convert MAPDL macros to python functions.
use_function_names : bool, optional
Convert MAPDL functions to ansys.mapdl.core.Mapdl class
methods. When ``True``, the MAPDL command "K" will be
converted to ``mapdl.k``. When ``False``, it will be
converted to ``mapdl.run('k')``.
show_log : bool, optional
Print the converted commands using a logger (from ``logging``
Python module).
add_imports : bool, optional
If ``True``, it add the lines ``rom ansys.mapdl.core import launch_mapdl``
and ``mapdl = launch_mapdl(loglevel="WARNING")``to the beginning of the
output file.
This option is useful if you are planning to use the output
script from another mapdl session. See examples section.
This option overrides ``'auto_exit'``.
comment_solve : bool, optional
If ``True``, it will pythonically comment the lines with
contains ``mapdl.solve`` or ``/EOF``.
format_output : bool, optional
If ``True`` the output is formatted using ``autopep8`` before
writing the file or returning the string.
header : bool, optional
If ``True``, the default header is written in the first line
of the output. If a string is provided, this string will be
used as header.
Returns
-------
list
List of lines translated.
Examples
--------
>>> from ansys.mapdl import core as pymapdl
>>> from ansys.mapdl.core import examples
>>> clines = pymapdl.convert_script(examples.vmfiles['vm1'], 'vm1.py')
Converting a script and using it already in the same session.
For this case, it is recommended to use ``convert_apdl_block``
from ``converter``module since you do not have to write the file.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core import examples
>>> from ansys.mapdl.core import convert_script
>>> in_file = examples.vmfiles['vm10']
>>> filename = in_file.split('\\')[-1]
>>> out_file = 'out_' + filename.replace('.dat', '.py')
>>> output = convert_script(file, out_file, line_ending='\\n')
>>> mapdl = launch_mapdl()
>>> with open(out_file, 'r') as fid:
... cmds = fid.read()
>>> mapdl.input_strings(cmds.splitlines()[2:10])
"""
with open(filename_in, 'r') as fid:
apdl_strings = fid.readlines()
translator = _convert(apdl_strings=apdl_strings,
loglevel=loglevel,
auto_exit=auto_exit,
line_ending=line_ending,
exec_file=exec_file,
macros_as_functions=macros_as_functions,
use_function_names=use_function_names,
show_log=show_log,
add_imports = add_imports,
comment_solve = comment_solve,
format_output = format_output,
header = header
)
translator.save(filename_out)
return translator.lines
|
def convert_script(
filename_in,
filename_out,
loglevel="WARNING",
auto_exit=True,
line_ending=None,
exec_file=None,
macros_as_functions=True,
use_function_names=True,
show_log = False,
add_imports = True,
comment_solve = False,
format_output = True,
header = True
):
"""Converts an ANSYS input file to a python PyMAPDL script.
Parameters
----------
filename_in : str
Filename of the ansys input file to read in.
filename_out : str
Filename of the python script to write a translation to.
loglevel : str, optional
Logging level of the ansys object within the script.
auto_exit : bool, optional
Adds a line to the end of the script to exit MAPDL. Default
``True``.
line_ending : str, optional
When None, automatically determined by OS being used.
macros_as_functions : bool, optional
Attempt to convert MAPDL macros to python functions.
use_function_names : bool, optional
Convert MAPDL functions to ansys.mapdl.core.Mapdl class
methods. When ``True``, the MAPDL command "K" will be
converted to ``mapdl.k``. When ``False``, it will be
converted to ``mapdl.run('k')``.
show_log : bool, optional
Print the converted commands using a logger (from ``logging``
Python module).
add_imports : bool, optional
If ``True``, it add the lines ``rom ansys.mapdl.core import launch_mapdl``
and ``mapdl = launch_mapdl(loglevel="WARNING")``to the beginning of the
output file.
This option is useful if you are planning to use the output
script from another mapdl session. See examples section.
This option overrides ``'auto_exit'``.
comment_solve : bool, optional
If ``True``, it will pythonically comment the lines with
contains ``mapdl.solve`` or ``/EOF``.
format_output : bool, optional
If ``True`` the output is formatted using ``autopep8`` before
writing the file or returning the string.
header : bool, optional
If ``True``, the default header is written in the first line
of the output. If a string is provided, this string will be
used as header.
Returns
-------
list
List of lines translated.
Examples
--------
>>> from ansys.mapdl import core as pymapdl
>>> from ansys.mapdl.core import examples
>>> clines = pymapdl.convert_script(examples.vmfiles['vm1'], 'vm1.py')
Converting a script and using it already in the same session.
For this case, it is recommended to use ``convert_apdl_block``
from ``converter``module since you do not have to write the file.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core import examples
>>> from ansys.mapdl.core import convert_script
>>> in_file = examples.vmfiles['vm10']
>>> filename = in_file.split('\\')[-1]
>>> out_file = 'out_' + filename.replace('.dat', '.py')
>>> output = convert_script(file, out_file, line_ending='\\n')
>>> mapdl = launch_mapdl()
>>> with open(out_file, 'r') as fid:
... cmds = fid.read()
>>> mapdl.input_strings(cmds.splitlines()[2:10])
"""
with open(filename_in, 'r') as fid:
apdl_strings = fid.readlines()
translator = _convert(apdl_strings=apdl_strings,
loglevel=loglevel,
auto_exit=auto_exit,
line_ending=line_ending,
exec_file=exec_file,
macros_as_functions=macros_as_functions,
use_function_names=use_function_names,
show_log=show_log,
add_imports = add_imports,
comment_solve = comment_solve,
format_output = format_output,
header = header
)
translator.save(filename_out)
return translator.lines
|
31,040 |
def main():
""" Main Function"""
try:
LOG('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
ACCESS_TOKEN = demisto.params().get('access_token')
REFRESH_TOKEN = demisto.params().get('refresh_token')
if demisto.command() == 'get-dlp-report':
report_id = demisto.args().get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
def main():
""" Main Function"""
try:
LOG('Command is %s' % (demisto.command(),))
global ACCESS_TOKEN, REFRESH_TOKEN
ACCESS_TOKEN = demisto.params().get('access_token')
REFRESH_TOKEN = params.get('refresh_token')
if demisto.command() == 'get-dlp-report':
report_id = demisto.args().get('report_id')
fetch_snippets = demisto.args().get('fetch_snippets', 'false') == 'true'
report_json, status_code = get_dlp_report(report_id, fetch_snippets)
parse_dlp_report(report_json)
if demisto.command() == "test-module":
test()
except Exception as e:
demisto.debug('Unknown Command')
error_message = str(e)
return_error(error_message)
finally:
LOG.print_log()
|
27,920 |
def generate_array(initializer, shape, xp, dtype=None, device=None):
# type: (types.AbstractInitializer, types.ShapeSpec, types.Xp, types.DTypeSpec, types.DeviceSpec) -> types.NdArray # NOQA
"""Return initialized array.
The algorithms used to make the new values depend on the
concrete derived classes. If the initializer has the ``dtype`` attribute,
it is used to construct the array. Otherwise, ``chainer.config.dtype`` is
used instead. See :ref:`configuration` for the dtype config.
Args:
initializer: A callable object that takes :ref:`ndarray` and edits its
value.
shape (int or tuple of int): Shape of a return array.
xp (module): :mod:`cupy`, :mod:`numpy`, or :mod:`chainerx`.
dtype: Dtype specifier. If omitted, ``initializer.dtype`` is used.
device: Target device specifier. If omitted, the current device is
used for :mod:`cupy`, and the default device is used for
:mod:`chainerx`.
Returns:
:ref:`ndarray`: An initialized array.
"""
dtype_attr = getattr(initializer, 'dtype', None)
if dtype is not None and dtype_attr is not None \
and numpy.dtype(dtype) != numpy.dtype(dtype_attr):
raise ValueError(
'dtype mismatch: {} != {}'.format(dtype, dtype_attr))
if dtype is None:
dtype = dtype_attr
dtype = chainer.get_dtype(dtype)
if device is None:
backend_device = backend._guess_device_from_array_module(xp)
else:
backend_device = chainer.get_device(device)
if xp != backend_device.xp:
raise ValueError('xp and device arguments are inconsistent.')
if xp is chainerx:
# Initialize with NumPy/CuPy array that shares memory with the
# ChainerX array.
# TODO(sonots): Directly use initializer after ChainerX
# supports random.
chx_device = backend_device.device
array = chainerx.empty(shape, dtype=dtype, device=chx_device)
fallback_device = backend_device.fallback_device
with chainer.using_device(fallback_device):
initializer(fallback_device.send(array))
return array
with chainer.using_device(backend_device):
array = xp.empty(shape, dtype=dtype)
initializer(array)
return array
|
def generate_array(initializer, shape, xp, dtype=None, device=None):
# type: (types.AbstractInitializer, types.ShapeSpec, types.Xp, types.DTypeSpec, types.DeviceSpec) -> types.NdArray # NOQA
"""Return initialized array.
The algorithms used to make the new values depend on the
concrete derived classes. If the initializer has the ``dtype`` attribute,
it is used to construct the array. Otherwise, ``chainer.config.dtype`` is
used instead. See :ref:`configuration` for the dtype config.
Args:
initializer: A callable object that takes :ref:`ndarray` and edits its
value.
shape (int or tuple of int): Shape of an initialized array.
xp (module): :mod:`cupy`, :mod:`numpy`, or :mod:`chainerx`.
dtype: Dtype specifier. If omitted, ``initializer.dtype`` is used.
device: Target device specifier. If omitted, the current device is
used for :mod:`cupy`, and the default device is used for
:mod:`chainerx`.
Returns:
:ref:`ndarray`: An initialized array.
"""
dtype_attr = getattr(initializer, 'dtype', None)
if dtype is not None and dtype_attr is not None \
and numpy.dtype(dtype) != numpy.dtype(dtype_attr):
raise ValueError(
'dtype mismatch: {} != {}'.format(dtype, dtype_attr))
if dtype is None:
dtype = dtype_attr
dtype = chainer.get_dtype(dtype)
if device is None:
backend_device = backend._guess_device_from_array_module(xp)
else:
backend_device = chainer.get_device(device)
if xp != backend_device.xp:
raise ValueError('xp and device arguments are inconsistent.')
if xp is chainerx:
# Initialize with NumPy/CuPy array that shares memory with the
# ChainerX array.
# TODO(sonots): Directly use initializer after ChainerX
# supports random.
chx_device = backend_device.device
array = chainerx.empty(shape, dtype=dtype, device=chx_device)
fallback_device = backend_device.fallback_device
with chainer.using_device(fallback_device):
initializer(fallback_device.send(array))
return array
with chainer.using_device(backend_device):
array = xp.empty(shape, dtype=dtype)
initializer(array)
return array
|
33,522 |
def handle_create_connection(data: Dict, headers: Dict):
name = data.get("Name")
auth_type = data.get("AuthorizationType")
errors = []
pattern = re.compile("^[\\.\\-_A-Za-z0-9]+$")
if not pattern.match(name):
error = (
"%s at 'name' failed to satisfy: Member must satisfy regular expression pattern: [\\.\\-_A-Za-z0-9]+"
% name
)
errors.append(error)
if len(name) >= 65:
error = (
"%s at 'name' failed to satisfy: Member must have length less than or equal to 64"
% name
)
errors.append(error)
if auth_type not in ["BASIC", "API_KEY", "OAUTH_CLIENT_CREDENTIALS"]:
error = (
"%s at 'authorizationType' failed to satisfy: Member must satisfy enum value set: [BASIC, OAUTH_CLIENT_CREDENTIALS, API_KEY]"
% name
)
errors.append(error)
if len(errors) > 0:
error_description = "; ".join(errors)
message = "%s validation %s detected: %s" % (
len(errors),
"errors" if len(errors) > 1 else "error",
error_description,
)
return requests_error_response(headers, message, 400, "ValidationException")
return True
|
def handle_create_connection(data: Dict, headers: Dict):
name = data.get("Name")
auth_type = data.get("AuthorizationType")
errors = []
pattern = re.compile("^[\\.\\-_A-Za-z0-9]+$")
if not pattern.match(name):
error = (
"%s at 'name' failed to satisfy: Member must satisfy regular expression pattern: [\\.\\-_A-Za-z0-9]+"
% name
)
errors.append(error)
if len(name) > 64:
error = (
"%s at 'name' failed to satisfy: Member must have length less than or equal to 64"
% name
)
errors.append(error)
if auth_type not in ["BASIC", "API_KEY", "OAUTH_CLIENT_CREDENTIALS"]:
error = (
"%s at 'authorizationType' failed to satisfy: Member must satisfy enum value set: [BASIC, OAUTH_CLIENT_CREDENTIALS, API_KEY]"
% name
)
errors.append(error)
if len(errors) > 0:
error_description = "; ".join(errors)
message = "%s validation %s detected: %s" % (
len(errors),
"errors" if len(errors) > 1 else "error",
error_description,
)
return requests_error_response(headers, message, 400, "ValidationException")
return True
|
34,954 |
def derive_similarity_tag(dag, log_base=1.618):
"""Derive the tag for similarity check from one computational DAG.
The DAGs with the same tag are considered as similar tasks.
Parameters
----------
dag: ComputeDAG
The input computational DAG
log_base: float = 1.618
The base of log to normalize FLOPS
Returns
-------
tag: str
The tag of this computational DAG.
"""
ret = ""
for op in dag.ops:
tag = op.attrs.get("ansor_task_scheduler_tag", None)
if tag:
ret += op.attrs["ansor_task_scheduler_tag"] + "_"
if ret != "":
ret += "%d" % int(math.log(dag.flop_ct + 1, log_base))
return ret
|
def derive_similarity_tag(dag, log_base=1.618):
"""Derive the tag for similarity check from one computational DAG.
The DAGs with the same tag are considered as similar tasks.
Parameters
----------
dag: ComputeDAG
The input computational DAG
log_base: float = 1.618
The base of log to normalize FLOPS
Returns
-------
tag: str
The tag of this computational DAG. The tag format is <op1-tag>_<op2-tag>...,_<log(flop)>.
"""
ret = ""
for op in dag.ops:
tag = op.attrs.get("ansor_task_scheduler_tag", None)
if tag:
ret += op.attrs["ansor_task_scheduler_tag"] + "_"
if ret != "":
ret += "%d" % int(math.log(dag.flop_ct + 1, log_base))
return ret
|
41,181 |
def integrated_histogram(
data: Union[Sequence[SupportsFloat], Mapping[Any, SupportsFloat]],
ax: Optional[plt.Axes] = None,
*,
cdf_on_x: bool = False,
axis_label: str = '',
semilog: bool = True,
median_line: bool = True,
median_label: Optional[str] = 'median',
mean_line: bool = False,
mean_label: Optional[str] = 'mean',
hide_zero: bool = True,
title: Optional[str] = None,
**kwargs,
) -> plt.Axes:
"""Plot the integrated histogram for an array of data.
Suppose the input is a list of gate fidelities. The x-axis of the plot will
be gate fidelity, and the y-axis will be the probability that a random gate
fidelity from the list is less than the x-value. It will look something like
this
1.0
| |
| ___|
| |
| ____|
| |
| |
|_____|_______________
0.0
Another way of saying this is that we assume the probability distribution
function (pdf) of gate fidelities is a set of equally weighted delta
functions at each value in the list. Then, the "integrated histogram"
is the cumulative distribution function (cdf) for this pdf.
Args:
data: Data to histogram. If the data is a mapping, we histogram the
values. All nans will be removed.
ax: The axis to plot on. If None, we generate one.
cdf_on_x: If True, flip the axes compared the above example.
axis_label: Label for x axis.
semilog: If True, force the x-axis to be logarithmic.
median_line: If True, draw a vertical line on the median value.
median_label: If drawing median line, optional label for it.
mean_line: If True, draw a vertical line on the mean value.
mean_label: If drawing mean line, optional label for it.
**plot_options: Kwargs to forward to `ax.step()`. Some examples are
color: Color of the line.
linestyle: Linestyle to use for the plot.
lw: linewidth for integrated histogram
ms: marker size for a histogram trace
label: An optional label which can be used in a legend.
Returns:
The axis that was plotted on.
"""
show_plot = not ax
if ax is None:
fig, ax = plt.subplots(1, 1)
if isinstance(data, Mapping):
data = list(data.values())
data = [d for d in data if not np.isnan(d)]
n = len(data)
if not hide_zero:
bin_values = np.linspace(0, 1, n + 1)
parameter_values = sorted(np.concatenate(([0], data)))
else:
bin_values = np.linspace(0, 1, n)
parameter_values = sorted(data)
plot_options = {
"where": 'post',
"color": 'b',
"linestyle": '-',
"lw": 1.0,
"ms": 0.0,
}
plot_options.update(kwargs)
if cdf_on_x:
ax.step(bin_values, parameter_values, **plot_options)
setsemilog = ax.semilogy
setlim = ax.set_xlim
setticks = ax.set_xticks
setline = ax.axhline
cdflabel = ax.set_xlabel
axlabel = ax.set_ylabel
else:
ax.step(parameter_values, bin_values, **plot_options)
setsemilog = ax.semilogx
setlim = ax.set_ylim
setticks = ax.set_yticks
setline = ax.axvline
cdflabel = ax.set_ylabel
axlabel = ax.set_xlabel
if not title:
title = f'N={n}'
ax.set_title(title)
if semilog:
setsemilog()
setlim(0, 1)
setticks([0.0, 0.25, 0.5, 0.75, 1.0])
ax.grid(True)
cdflabel('Integrated histogram')
if axis_label:
axlabel(axis_label)
if 'label' in plot_options:
ax.legend()
if median_line:
setline(
np.median(data),
linestyle='--',
color=plot_options['color'],
alpha=0.5,
label=median_label,
)
if mean_line:
setline(
np.mean(data), linestyle='-.', color=plot_options['color'], alpha=0.5, label=mean_label
)
if show_plot:
fig.show()
return ax
|
def integrated_histogram(
data: Union[Sequence[SupportsFloat], Mapping[Any, SupportsFloat]],
ax: Optional[plt.Axes] = None,
*,
cdf_on_x: bool = False,
axis_label: str = '',
semilog: bool = True,
median_line: bool = True,
median_label: Optional[str] = 'median',
mean_line: bool = False,
mean_label: Optional[str] = 'mean',
hide_zero: bool = True,
title: Optional[str] = None,
**kwargs,
) -> plt.Axes:
"""Plot the integrated histogram for an array of data.
Suppose the input is a list of gate fidelities. The x-axis of the plot will
be gate fidelity, and the y-axis will be the probability that a random gate
fidelity from the list is less than the x-value. It will look something like
this
1.0
| |
| ___|
| |
| ____|
| |
| |
|_____|_______________
0.0
Another way of saying this is that we assume the probability distribution
function (pdf) of gate fidelities is a set of equally weighted delta
functions at each value in the list. Then, the "integrated histogram"
is the cumulative distribution function (cdf) for this pdf.
Args:
data: Data to histogram. If the data is a `Mapping`, we histogram the
values. All nans will be removed.
ax: The axis to plot on. If None, we generate one.
cdf_on_x: If True, flip the axes compared the above example.
axis_label: Label for x axis.
semilog: If True, force the x-axis to be logarithmic.
median_line: If True, draw a vertical line on the median value.
median_label: If drawing median line, optional label for it.
mean_line: If True, draw a vertical line on the mean value.
mean_label: If drawing mean line, optional label for it.
**plot_options: Kwargs to forward to `ax.step()`. Some examples are
color: Color of the line.
linestyle: Linestyle to use for the plot.
lw: linewidth for integrated histogram
ms: marker size for a histogram trace
label: An optional label which can be used in a legend.
Returns:
The axis that was plotted on.
"""
show_plot = not ax
if ax is None:
fig, ax = plt.subplots(1, 1)
if isinstance(data, Mapping):
data = list(data.values())
data = [d for d in data if not np.isnan(d)]
n = len(data)
if not hide_zero:
bin_values = np.linspace(0, 1, n + 1)
parameter_values = sorted(np.concatenate(([0], data)))
else:
bin_values = np.linspace(0, 1, n)
parameter_values = sorted(data)
plot_options = {
"where": 'post',
"color": 'b',
"linestyle": '-',
"lw": 1.0,
"ms": 0.0,
}
plot_options.update(kwargs)
if cdf_on_x:
ax.step(bin_values, parameter_values, **plot_options)
setsemilog = ax.semilogy
setlim = ax.set_xlim
setticks = ax.set_xticks
setline = ax.axhline
cdflabel = ax.set_xlabel
axlabel = ax.set_ylabel
else:
ax.step(parameter_values, bin_values, **plot_options)
setsemilog = ax.semilogx
setlim = ax.set_ylim
setticks = ax.set_yticks
setline = ax.axvline
cdflabel = ax.set_ylabel
axlabel = ax.set_xlabel
if not title:
title = f'N={n}'
ax.set_title(title)
if semilog:
setsemilog()
setlim(0, 1)
setticks([0.0, 0.25, 0.5, 0.75, 1.0])
ax.grid(True)
cdflabel('Integrated histogram')
if axis_label:
axlabel(axis_label)
if 'label' in plot_options:
ax.legend()
if median_line:
setline(
np.median(data),
linestyle='--',
color=plot_options['color'],
alpha=0.5,
label=median_label,
)
if mean_line:
setline(
np.mean(data), linestyle='-.', color=plot_options['color'], alpha=0.5, label=mean_label
)
if show_plot:
fig.show()
return ax
|
50,917 |
def _apply_update(engine, new_version, old_version):
"""Perform operations to bring schema up to date."""
if new_version == 1:
_create_index(engine, "events", "ix_events_time_fired")
elif new_version == 2:
# Create compound start/end index for recorder_runs
_create_index(engine, "recorder_runs", "ix_recorder_runs_start_end")
# Create indexes for states
_create_index(engine, "states", "ix_states_last_updated")
elif new_version == 3:
# There used to be a new index here, but it was removed in version 4.
pass
elif new_version == 4:
# Queries were rewritten in this schema release. Most indexes from
# earlier versions of the schema are no longer needed.
if old_version == 3:
# Remove index that was added in version 3
_drop_index(engine, "states", "ix_states_created_domain")
if old_version == 2:
# Remove index that was added in version 2
_drop_index(engine, "states", "ix_states_entity_id_created")
# Remove indexes that were added in version 0
_drop_index(engine, "states", "states__state_changes")
_drop_index(engine, "states", "states__significant_changes")
_drop_index(engine, "states", "ix_states_entity_id_created")
_create_index(engine, "states", "ix_states_entity_id_last_updated")
elif new_version == 5:
# Create supporting index for States.event_id foreign key
_create_index(engine, "states", "ix_states_event_id")
elif new_version == 6:
_add_columns(
engine,
"events",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(engine, "events", "ix_events_context_id")
_create_index(engine, "events", "ix_events_context_user_id")
_add_columns(
engine,
"states",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(engine, "states", "ix_states_context_id")
_create_index(engine, "states", "ix_states_context_user_id")
elif new_version == 7:
_create_index(engine, "states", "ix_states_entity_id")
elif new_version == 8:
_add_columns(engine, "events", ["context_parent_id CHARACTER(36)"])
_add_columns(engine, "states", ["old_state_id INTEGER"])
_create_index(engine, "events", "ix_events_context_parent_id")
elif new_version == 9:
# We now get the context from events with a join
# since its always there on state_changed events
#
# Ideally we would drop the columns from the states
# table as well but sqlite doesn't support that
# and we would have to move to something like
# sqlalchemy alembic to make that work
#
_drop_index(engine, "states", "ix_states_context_id")
_drop_index(engine, "states", "ix_states_context_user_id")
# This index won't be there if they were not running
# nightly but we don't treat that as a critical issue
_drop_index(engine, "states", "ix_states_context_parent_id")
# Redundant keys on composite index:
# We already have ix_states_entity_id_last_updated
_drop_index(engine, "states", "ix_states_entity_id")
_create_index(engine, "events", "ix_events_event_type_time_fired")
_drop_index(engine, "events", "ix_events_event_type")
elif new_version == 10:
# Now done in step 11
pass
elif new_version == 11:
_create_index(engine, "states", "ix_states_old_state_id")
_update_states_table_with_foreign_key_options(engine)
elif new_version == 12:
if engine.dialect.name == "mysql":
_modify_columns(engine, "events", ["event_data LONGTEXT"])
_modify_columns(engine, "states", ["attributes LONGTEXT"])
elif new_version == 13:
if engine.dialect.name == "mysql":
_modify_columns(engine, "events", ["time_fired DATETIME(6)"])
_modify_columns(engine, "events", ["created DATETIME(6)"])
_modify_columns(engine, "states", ["last_changed DATETIME(6)"])
_modify_columns(engine, "states", ["last_updated DATETIME(6)"])
_modify_columns(engine, "states", ["created DATETIME(6)"])
else:
raise ValueError(f"No schema migration defined for version {new_version}")
|
def _apply_update(engine, new_version, old_version):
"""Perform operations to bring schema up to date."""
if new_version == 1:
_create_index(engine, "events", "ix_events_time_fired")
elif new_version == 2:
# Create compound start/end index for recorder_runs
_create_index(engine, "recorder_runs", "ix_recorder_runs_start_end")
# Create indexes for states
_create_index(engine, "states", "ix_states_last_updated")
elif new_version == 3:
# There used to be a new index here, but it was removed in version 4.
pass
elif new_version == 4:
# Queries were rewritten in this schema release. Most indexes from
# earlier versions of the schema are no longer needed.
if old_version == 3:
# Remove index that was added in version 3
_drop_index(engine, "states", "ix_states_created_domain")
if old_version == 2:
# Remove index that was added in version 2
_drop_index(engine, "states", "ix_states_entity_id_created")
# Remove indexes that were added in version 0
_drop_index(engine, "states", "states__state_changes")
_drop_index(engine, "states", "states__significant_changes")
_drop_index(engine, "states", "ix_states_entity_id_created")
_create_index(engine, "states", "ix_states_entity_id_last_updated")
elif new_version == 5:
# Create supporting index for States.event_id foreign key
_create_index(engine, "states", "ix_states_event_id")
elif new_version == 6:
_add_columns(
engine,
"events",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(engine, "events", "ix_events_context_id")
_create_index(engine, "events", "ix_events_context_user_id")
_add_columns(
engine,
"states",
["context_id CHARACTER(36)", "context_user_id CHARACTER(36)"],
)
_create_index(engine, "states", "ix_states_context_id")
_create_index(engine, "states", "ix_states_context_user_id")
elif new_version == 7:
_create_index(engine, "states", "ix_states_entity_id")
elif new_version == 8:
_add_columns(engine, "events", ["context_parent_id CHARACTER(36)"])
_add_columns(engine, "states", ["old_state_id INTEGER"])
_create_index(engine, "events", "ix_events_context_parent_id")
elif new_version == 9:
# We now get the context from events with a join
# since its always there on state_changed events
#
# Ideally we would drop the columns from the states
# table as well but sqlite doesn't support that
# and we would have to move to something like
# sqlalchemy alembic to make that work
#
_drop_index(engine, "states", "ix_states_context_id")
_drop_index(engine, "states", "ix_states_context_user_id")
# This index won't be there if they were not running
# nightly but we don't treat that as a critical issue
_drop_index(engine, "states", "ix_states_context_parent_id")
# Redundant keys on composite index:
# We already have ix_states_entity_id_last_updated
_drop_index(engine, "states", "ix_states_entity_id")
_create_index(engine, "events", "ix_events_event_type_time_fired")
_drop_index(engine, "events", "ix_events_event_type")
elif new_version == 10:
# Now done in step 11
pass
elif new_version == 11:
_create_index(engine, "states", "ix_states_old_state_id")
_update_states_table_with_foreign_key_options(engine)
elif new_version == 12:
if engine.dialect.name == "mysql":
_modify_columns(engine, "events", ["event_data LONGTEXT"])
_modify_columns(engine, "states", ["attributes LONGTEXT"])
elif new_version == 13:
if engine.dialect.name == "mysql":
_modify_columns(engine, "events", ["time_fired DATETIME(6)", "created DATETIME(6)"])
_modify_columns(engine, "states", ["last_changed DATETIME(6)", "last_updated DATETIME(6)", "created DATETIME(6)"])
else:
raise ValueError(f"No schema migration defined for version {new_version}")
|
12,060 |
def test_dry_run_noisy_option(runner):
with open("requirements", "w"):
pass
out = runner.invoke(cli, ["--dry-run", "-n", "requirements"])
# Dry-run massage has been written to output
assert "Dry-run, so nothing updated." in out.stderr.strip()
|
def test_dry_run_noisy_option(runner):
with open("requirements", "w"):
pass
out = runner.invoke(cli, ["--dry-run", "-n", "requirements"])
# Dry-run message has been written to output
assert "Dry-run, so nothing updated." in out.stderr.strip()
|
46,275 |
def test_wrong_end_control_point():
"""Test wrong start of control points raises an error."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
with pytest.raises(ValueError):
Colormap(colors, name='testing', controls=[0, 0.75, 0.9])
|
def test_wrong_end_control_point():
"""Test wrong end of control points raises an error."""
colors = np.array([[0, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]])
with pytest.raises(ValueError):
Colormap(colors, name='testing', controls=[0, 0.75, 0.9])
|
6,417 |
def execute():
purchase_receipts = frappe.db.sql("""
SELECT
parent from `tabPurchase Receipt Item`
WHERE
material_request is not null
AND docstatus=1
""",as_dict=1)
purchase_receipts = set([d.parent for d in purchase_receipts])
for pr in purchase_receipts:
doc = frappe.get_doc("Purchase Receipt", pr)
doc.status_updater = [
{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Material Request Item',
'join_field': 'material_request_item',
'target_field': 'received_qty',
'target_parent_dt': 'Material Request',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'stock_qty',
'percent_join_field': 'material_request'
}
]
doc.update_prevdoc_status()
|
def execute():
purchase_receipts = frappe.db.sql("""
SELECT
parent from `tabPurchase Receipt Item`
WHERE
material_request is not null
AND docstatus=1
""",as_dict=1)
purchase_receipts = set([d.parent for d in purchase_receipts])
for pr in purchase_receipts:
doc = frappe.get_doc("Purchase Receipt", pr)
doc.status_updater = [
{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Material Request Item',
'join_field': 'material_request_item',
'target_field': 'received_qty',
'target_parent_dt': 'Material Request',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'stock_qty',
'percent_join_field': 'material_request'
}
]
doc.update_qty()
|
56,348 |
def test_fixture_param_shadowing(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=['a', 'b'])
def argroot(request):
return request.param
@pytest.fixture
def arg(argroot):
return argroot
@pytest.mark.parametrize("arg", [1])
def test_simple(arg, request):
assert arg == 1
"""
)
result = testdir.runpytest()
# Only one test should have run
result.assert_outcomes(passed=1)
|
def test_fixture_param_shadowing(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=['a', 'b'])
def argroot(request):
return request.param
@pytest.fixture
def arg(argroot):
return argroot
@pytest.mark.parametrize("arg", [1])
def test_simple(arg, request):
assert arg == 1
"""
)
result = testdir.runpytest()
# Only one test should have run
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(["::test_simple[1]"])
|
24,875 |
def check_config_3(machine, old_conf, new_conf):
"""
Example code must not trigger the message,
Given an if-elif construct
When the body of the if ends with a function call
Then no message shall be triggered.
Note: There is nothing special about the body ending with a function call.
This is just taken as a representative value for the equivalence class of
"every node class unrelated to if/elif/else".
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
print("Processed old configuration...")
elif new_conf:
machine.enable(new_conf.value)
|
def not_triggered_if_indentend_block_ends_with_call(machine, old_conf, new_conf):
"""
Example code must not trigger the message,
Given an if-elif construct
When the body of the if ends with a function call
Then no message shall be triggered.
Note: There is nothing special about the body ending with a function call.
This is just taken as a representative value for the equivalence class of
"every node class unrelated to if/elif/else".
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
print("Processed old configuration...")
elif new_conf:
machine.enable(new_conf.value)
|
30,631 |
def fetch_incidents(client, last_run):
""" Callback to fetch incidents periodically """
last_fetch_time = last_run.get('last_fetch', datetime.utcnow().timestamp() - 60)
site, concentrator, map = get_site_params()
params = demisto.params()
tags = params.get('tags') or None
event_types = params.get('event_types')
zone_events = []
if event_types is None or 'zone_event' in event_types:
zone_events = client.get_zone_events(concentrator=concentrator, map=map,
site=site, tags=tags, since=last_fetch_time)
device_events = []
if event_types is None or 'device_event' in event_types:
device_events = client.get_device_events(concentrator=concentrator, map=map,
site=site, tags=tags,
since=last_fetch_time)
events = zone_events + device_events
incidents = []
for event in events:
event_time = int(event['time_s'])
incident = {
'name': event['event_type'],
'occurred': datetime.utcfromtimestamp(event_time).strftime(
DATE_FORMAT),
'rawJSON': json.dumps(event),
}
incidents.append(incident)
if event_time > last_fetch_time:
last_fetch_time = event_time
next_run = {'last_fetch': last_fetch_time}
return next_run, incidents
|
def fetch_incidents(client, last_run):
""" Callback to fetch incidents periodically """
last_fetch_time = last_run.get('last_fetch', datetime.utcnow().timestamp() - NEW-PARAM)
site, concentrator, map = get_site_params()
params = demisto.params()
tags = params.get('tags') or None
event_types = params.get('event_types')
zone_events = []
if event_types is None or 'zone_event' in event_types:
zone_events = client.get_zone_events(concentrator=concentrator, map=map,
site=site, tags=tags, since=last_fetch_time)
device_events = []
if event_types is None or 'device_event' in event_types:
device_events = client.get_device_events(concentrator=concentrator, map=map,
site=site, tags=tags,
since=last_fetch_time)
events = zone_events + device_events
incidents = []
for event in events:
event_time = int(event['time_s'])
incident = {
'name': event['event_type'],
'occurred': datetime.utcfromtimestamp(event_time).strftime(
DATE_FORMAT),
'rawJSON': json.dumps(event),
}
incidents.append(incident)
if event_time > last_fetch_time:
last_fetch_time = event_time
next_run = {'last_fetch': last_fetch_time}
return next_run, incidents
|
47,153 |
def _monkey_patch_tensor_methods_for_model_recording(model, method_names):
"""
Helper function that patchs torch.Tensor methods (specified by the method_names list) to record model inference
before symbolic tracing.
"""
cache_names = dict()
original_methods = dict()
for method_name in method_names:
cache_name = f"cache_{method_name}"
cache_names[method_name] = cache_name
if not hasattr(torch.Tensor, method_name):
print(f"torch.Tensor has no method called {method_name}, skipping patching.")
continue
original_methods[method_name] = getattr(torch.Tensor, method_name)
setattr(torch.Tensor, method_name, _wrap_method_for_model_recording(model, method_name, cache_name))
if method_name == "size":
original_methods["shape"] = torch.Tensor.shape
setattr(torch.Tensor, "shape", property(getattr(torch.Tensor, method_name)))
return cache_names, original_methods
|
def _monkey_patch_tensor_methods_for_model_recording(model, method_names):
"""
Helper function that patches torch.Tensor methods (specified by the method_names list) to record model inference
before symbolic tracing.
"""
cache_names = dict()
original_methods = dict()
for method_name in method_names:
cache_name = f"cache_{method_name}"
cache_names[method_name] = cache_name
if not hasattr(torch.Tensor, method_name):
print(f"torch.Tensor has no method called {method_name}, skipping patching.")
continue
original_methods[method_name] = getattr(torch.Tensor, method_name)
setattr(torch.Tensor, method_name, _wrap_method_for_model_recording(model, method_name, cache_name))
if method_name == "size":
original_methods["shape"] = torch.Tensor.shape
setattr(torch.Tensor, "shape", property(getattr(torch.Tensor, method_name)))
return cache_names, original_methods
|
39,654 |
def large_request(start_dt,end_dt,d1,d2,step,verbose):
"""
break start and end date into smaller increments, collecting all data in small chunks and appending all results to a common dataframe
end_dt is the date strings for the final day of the query
d1 and d2 are datetime objects for first and last day of query, for doing date math
a third datetime object (d) will be used to increment over time for the several intermediate queries
"""
error_counter = 0 # count failed requests. If > X, break
no_success_msg_flag = False # a flag for passing over the success message of requests are failing
print("This is a large query, it may take a moment to complete")
dataframe_list = []
#step = 3 # number of days per mini-query (test this later to see how large I can make this without losing data)
d = d1 + datetime.timedelta(days=step)
while d <= d2: #while intermediate query end_dt <= global query end_dt, keep looping
# dates before 3/15 and after 11/15 will always be offseason
# if these dates are detected, check if the next season is within the user's query
# if yes, fast-forward to the next season to avoid empty requests
# if no, break the loop. all useful data has been pulled.
if d.month < 4 and d.day < 15:
print('Skipping offseason dates')
d1 = d1.replace(month=3,day=15,year=d1.year)
d = d1 + datetime.timedelta(days=step+1)
elif d1.month > 10 and d1.day > 14:
if d2.year > d.year:
print('Skipping offseason dates')
d1 = d1.replace(month=3,day=15,year=d1.year+1)
d = d1 + datetime.timedelta(days=step+1)
else:
break
start_dt = d1.strftime('%Y-%m-%d')
intermediate_end_dt = d.strftime('%Y-%m-%d')
data = small_request(start_dt,intermediate_end_dt)
# append to list of dataframes if not empty or failed (failed requests have one row saying "Error: Query Timeout")
if data.shape[0] > 1:
dataframe_list.append(data)
# if it failed, retry up to three times
else:
success = 0
while success == 0:
data = small_request(start_dt,intermediate_end_dt)
if data.shape[0] > 1:
dataframe_list.append(data)
success = 1
else:
error_counter += 1
if error_counter > 2:
# this request is probably too large. Cut a day off of this request and make that its own separate request.
# For each, append to dataframe list if successful, skip and print error message if failed
tmp_end = d - datetime.timedelta(days=1)
tmp_end = tmp_end.strftime('%Y-%m-%d')
smaller_data_1 = small_request(start_dt, tmp_end)
smaller_data_2 = small_request(intermediate_end_dt,intermediate_end_dt)
if smaller_data_1.shape[0] > 1:
dataframe_list.append(smaller_data_1)
print("Completed sub-query from {} to {}".format(start_dt,tmp_end))
else:
print("Query unsuccessful for data from {} to {}. Skipping these dates.".format(start_dt,tmp_end))
if smaller_data_2.shape[0] > 1:
dataframe_list.append(smaller_data_2)
print("Completed sub-query from {} to {}".format(intermediate_end_dt,intermediate_end_dt))
else:
print("Query unsuccessful for data from {} to {}. Skipping these dates.".format(intermediate_end_dt,intermediate_end_dt))
no_success_msg_flag = True # flag for passing over the success message since this request failed
error_counter = 0 # reset counter
break
if verbose:
if no_success_msg_flag is False:
print("Completed sub-query from {} to {}".format(start_dt,intermediate_end_dt))
else:
no_success_msg_flag = False # if failed, reset this flag so message will send again next iteration
# increment dates
d1 = d + datetime.timedelta(days=1)
d = d + datetime.timedelta(days=step+1)
# if start date > end date after being incremented, the loop captured each date's data
if d1 > d2:
pass
# if start date <= end date, then there are a few leftover dates to grab data for.
else:
# start_dt from the earlier loop will work, but instead of d we now want the original end_dt
start_dt = d1.strftime('%Y-%m-%d')
data = small_request(start_dt,end_dt)
dataframe_list.append(data)
if verbose:
print("Completed sub-query from {} to {}".format(start_dt,end_dt))
# concatenate all dataframes into final result set
final_data = pd.concat(dataframe_list, axis=0)
return final_data
|
def large_request(start_dt,end_dt,d1,d2,step,verbose):
"""
break start and end date into smaller increments, collecting all data in small chunks and appending all results to a common dataframe
end_dt is the date strings for the final day of the query
d1 and d2 are datetime objects for first and last day of query, for doing date math
a third datetime object (d) will be used to increment over time for the several intermediate queries
"""
error_counter = 0 # count failed requests. If > X, break
no_success_msg_flag = False # a flag for passing over the success message of requests are failing
print("This is a large query, it may take a moment to complete")
dataframe_list = []
#step = 3 # number of days per mini-query (test this later to see how large I can make this without losing data)
d = d1 + datetime.timedelta(days=step)
while d <= d2: #while intermediate query end_dt <= global query end_dt, keep looping
# dates before 3/15 and after 11/15 will always be offseason
# if these dates are detected, check if the next season is within the user's query
# if yes, fast-forward to the next season to avoid empty requests
# if no, break the loop. all useful data has been pulled.
if (d.month == 3 and d.day < 15) or d.month <= 2:
print('Skipping offseason dates')
d1 = d1.replace(month=3,day=15,year=d1.year)
d = d1 + datetime.timedelta(days=step+1)
elif d1.month > 10 and d1.day > 14:
if d2.year > d.year:
print('Skipping offseason dates')
d1 = d1.replace(month=3,day=15,year=d1.year+1)
d = d1 + datetime.timedelta(days=step+1)
else:
break
start_dt = d1.strftime('%Y-%m-%d')
intermediate_end_dt = d.strftime('%Y-%m-%d')
data = small_request(start_dt,intermediate_end_dt)
# append to list of dataframes if not empty or failed (failed requests have one row saying "Error: Query Timeout")
if data.shape[0] > 1:
dataframe_list.append(data)
# if it failed, retry up to three times
else:
success = 0
while success == 0:
data = small_request(start_dt,intermediate_end_dt)
if data.shape[0] > 1:
dataframe_list.append(data)
success = 1
else:
error_counter += 1
if error_counter > 2:
# this request is probably too large. Cut a day off of this request and make that its own separate request.
# For each, append to dataframe list if successful, skip and print error message if failed
tmp_end = d - datetime.timedelta(days=1)
tmp_end = tmp_end.strftime('%Y-%m-%d')
smaller_data_1 = small_request(start_dt, tmp_end)
smaller_data_2 = small_request(intermediate_end_dt,intermediate_end_dt)
if smaller_data_1.shape[0] > 1:
dataframe_list.append(smaller_data_1)
print("Completed sub-query from {} to {}".format(start_dt,tmp_end))
else:
print("Query unsuccessful for data from {} to {}. Skipping these dates.".format(start_dt,tmp_end))
if smaller_data_2.shape[0] > 1:
dataframe_list.append(smaller_data_2)
print("Completed sub-query from {} to {}".format(intermediate_end_dt,intermediate_end_dt))
else:
print("Query unsuccessful for data from {} to {}. Skipping these dates.".format(intermediate_end_dt,intermediate_end_dt))
no_success_msg_flag = True # flag for passing over the success message since this request failed
error_counter = 0 # reset counter
break
if verbose:
if no_success_msg_flag is False:
print("Completed sub-query from {} to {}".format(start_dt,intermediate_end_dt))
else:
no_success_msg_flag = False # if failed, reset this flag so message will send again next iteration
# increment dates
d1 = d + datetime.timedelta(days=1)
d = d + datetime.timedelta(days=step+1)
# if start date > end date after being incremented, the loop captured each date's data
if d1 > d2:
pass
# if start date <= end date, then there are a few leftover dates to grab data for.
else:
# start_dt from the earlier loop will work, but instead of d we now want the original end_dt
start_dt = d1.strftime('%Y-%m-%d')
data = small_request(start_dt,end_dt)
dataframe_list.append(data)
if verbose:
print("Completed sub-query from {} to {}".format(start_dt,end_dt))
# concatenate all dataframes into final result set
final_data = pd.concat(dataframe_list, axis=0)
return final_data
|
45,628 |
def colors_array_layout(data):
"""Creates an array of colors that is used to specify the layout datum colors.
The colors are based on a gradient between two specified colors. In this case
they are "red" and "blue". Simply change these colors if you'd like a different
gradient.
"""
rows = len(data.index)
input_color = Color("red")
color_array = list(input_color.range_to(Color("blue"), rows))
color_array = [color_iter.hex for color_iter in color_array]
return color_array
|
def colors_array_layout(data):
"""Create an array of colors that is used to specify the layout datum colors.
The colors are based on a gradient between two specified colors. In this case
they are "red" and "blue". Simply change these colors if you'd like a different
gradient.
"""
rows = len(data.index)
input_color = Color("red")
color_array = list(input_color.range_to(Color("blue"), rows))
color_array = [color_iter.hex for color_iter in color_array]
return color_array
|
45,769 |
def test_patchembed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
conv_type=None,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None,
)
x1, shape = patch_merge_1(dummy_input)
assert x1.shape == (2, 2, 10)
assert shape == (1, 2)
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
conv_type=None,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
assert x2.shape == (2, 1, 10)
assert shape == (1, 1)
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
conv_type=None,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
assert x3.shape == (2, 1, 10)
assert shape == (1, 1)
assert shape[0] * shape[1] == x3.shape[1]
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
|
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
conv_type=None,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None,
)
x1, shape = patch_merge_1(dummy_input)
assert x1.shape == (2, 2, 10)
assert shape == (1, 2)
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
conv_type=None,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
assert x2.shape == (2, 1, 10)
assert shape == (1, 1)
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
conv_type=None,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
assert x3.shape == (2, 1, 10)
assert shape == (1, 1)
assert shape[0] * shape[1] == x3.shape[1]
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
|
30,816 |
def update_user_command(client, args):
"""
Update user using PATCH to Servicenow API , if Connection to the service is successful.
Args: demisto command line argument
client: Service Client
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
old_scim = verify_and_load_scim_data(args.get('oldScim'))
new_scim = verify_and_load_scim_data(args.get('newScim'))
custom_mapping = demisto.params().get('customMappingUpdateUser')
parsed_old_scim = map_scim(old_scim)
user_id = parsed_old_scim.get('id')
if not (user_id):
raise Exception('You must provide id of the user')
servicenow_user = client.build_servicenow_user_profile(args, new_scim, custom_mapping)
res = client.update_user(user_id, servicenow_user)
res_json = res.json()
if res.status_code == 200:
result = res_json['result']
active = True if result['active'] == 'true' else False
id = result['sys_id']
email = result['email']
username = result['user_name']
generic_iam_context = OutputContext(success=True, iden=id, email=email,
username=username, details=result, active=active)
else:
generic_iam_context = OutputContext(success=False, iden=user_id, errorCode=res.status_code,
errorMessage=res_json.get('error', {}).get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Update ServiceNow User:', t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id", "username", "email",
"errorCode", "errorMessage", "details"],
removeNull=True
)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
def update_user_command(client, args):
"""
Update user using PATCH to Servicenow API , if Connection to the service is successful.
Args: demisto command line argument
client: Service Client
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
old_scim = verify_and_load_scim_data(args.get('oldScim'))
new_scim = verify_and_load_scim_data(args.get('newScim'))
custom_mapping = demisto.params().get('customMappingUpdateUser')
parsed_old_scim = map_scim(old_scim)
user_id = parsed_old_scim.get('id')
if not user_id:
raise Exception('You must provide id of the user')
servicenow_user = client.build_servicenow_user_profile(args, new_scim, custom_mapping)
res = client.update_user(user_id, servicenow_user)
res_json = res.json()
if res.status_code == 200:
result = res_json['result']
active = True if result['active'] == 'true' else False
id = result['sys_id']
email = result['email']
username = result['user_name']
generic_iam_context = OutputContext(success=True, iden=id, email=email,
username=username, details=result, active=active)
else:
generic_iam_context = OutputContext(success=False, iden=user_id, errorCode=res.status_code,
errorMessage=res_json.get('error', {}).get('message'), details=res_json)
generic_iam_context_dt = f'{generic_iam_context.command}(val.id == obj.id && val.instanceName == obj.instanceName)'
outputs = {
generic_iam_context_dt: generic_iam_context.data
}
readable_output = tableToMarkdown('Update ServiceNow User:', t=generic_iam_context.data,
headers=["brand", "instanceName", "success", "active", "id", "username", "email",
"errorCode", "errorMessage", "details"],
removeNull=True
)
return (
readable_output,
outputs,
generic_iam_context.data
)
|
48,622 |
def split_markdown_front_matter(lines: str) -> Tuple[str, str]:
r"""
Split text into markdown front matter and the markdown body
Return ("", text) for text with non existing front matter
>>> text='''---
... title: DUMMY-SECURITY-2019-001
... description: Incorrect access control.
... cves: [CVE-2042-1337]
... ---
... # Markdown starts here
... '''
>>> split_markdown_front_matter(text)
('title: DUMMY-SECURITY-2019-001\ndescription: Incorrect access control.\ncves: [CVE-2042-1337]', '# Markdown starts here\n')
"""
fmlines = []
mdlines = []
splitter = mdlines
lines = lines.replace("\r\n", "\n")
for index, line in enumerate(lines.split("\n")):
if index == 0 and line.strip().startswith("---"):
splitter = fmlines
elif line.strip().startswith("---"):
splitter = mdlines
else:
splitter.append(line)
return "\n".join(fmlines), "\n".join(mdlines)
|
def split_markdown_front_matter(lines: str) -> Tuple[str, str]:
r"""
Return a tuple of (front matter, markdown body) strings split from ``text``.
Each can be an empty string.
Return ("", text) for text with non existing front matter
>>> text='''---
... title: DUMMY-SECURITY-2019-001
... description: Incorrect access control.
... cves: [CVE-2042-1337]
... ---
... # Markdown starts here
... '''
>>> split_markdown_front_matter(text)
('title: DUMMY-SECURITY-2019-001\ndescription: Incorrect access control.\ncves: [CVE-2042-1337]', '# Markdown starts here\n')
"""
fmlines = []
mdlines = []
splitter = mdlines
lines = lines.replace("\r\n", "\n")
for index, line in enumerate(lines.split("\n")):
if index == 0 and line.strip().startswith("---"):
splitter = fmlines
elif line.strip().startswith("---"):
splitter = mdlines
else:
splitter.append(line)
return "\n".join(fmlines), "\n".join(mdlines)
|
44,198 |
def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`r \in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
def factorize(two, tol):
r"""Return double-factorized form of a two-electron tensor.
The second quantized electronic Hamiltonian is constructed in terms of fermionic creation,
:math:`a^{\dagger}` , and annihilation, :math:`a`, operators as
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_]
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} h_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
h_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \beta}^{\dagger} a_{r, \beta} a_{s, \alpha},
where :math:`h_{pq}` and :math:`h_{pqrs}` are the one- and two-electron integrals computed as
.. math::
h_{pq} = \int \phi_p(r)^* \left ( -\frac{\nabla_r^2}{2} - \sum_i \frac{Z_i}{|r-R_i|} \right)
\phi_q(r) dr,
and
.. math::
h_{pqrs} = \int \frac{\phi_p(r_1)^* \phi_q(r_2)^* \phi_r(r_2) \phi_s(r_1)}{|r_1 - r_2|}
dr_1 dr_2.
Rearranging the integrals in the chemist notation, [11|22], gives
.. math::
H = \sum_{\alpha \in \{\uparrow, \downarrow \} } \sum_{pq} T_{pq} a_{p,\alpha}^{\dagger}
a_{q, \alpha} + \frac{1}{2} \sum_{\alpha, \beta \in \{\uparrow, \downarrow \} } \sum_{pqrs}
V_{pqrs} a_{p, \alpha}^{\dagger} a_{q, \alpha} a_{r, \beta}^{\dagger} a_{s, \beta}.
with
.. math::
T_{pq} = h_{ij} - \frac{1}{2} \sum_s h_{pssq}.
and :math:`V` is the two-electron tensor in chemist notation.
The objective of the factorization is to find a set of symmetric matrices, :math:`L`, such that
.. math::
V_{ijkl} = \sum_r L_{ij}^{(r)} L_{kl}^{(r) T}.
with the rank :math:`k\in \mathcal{O}(n)`. The matrices :math:`L` are further diagonalized
and truncated in a second level of factorization.
The algorithm has the following steps
[`arXiv:1902.02134 <https://arxiv.org/pdf/1902.02134.pdf>`_].
1. Matricize the :math:`n \times n \times n \times n` two-electron tensor to a \
:math:`n^2 \times n^2` matrix where n is the number of orbitals.
2. Diagonalize the resulting matrix and keep the :math:`r` eigenvectors that have \
corresponding eigenvalues larger than a threshold.
3. Reshape the selected eigenvectors to :math:`n \times n` matrices.
4. Diagonalize the :math:`n \times n` matrices and keep those that the norm of their \
eigenvalues is larger than a threshold.
Args:
two (array[array[float]]): the two-electron repulsion tensor in the molecular orbital basis
arranged in chemist notation [11|22]
tol (float): cutoff value for discarding the negligible factors
Returns:
tuple(array[float]): array of symmetric matrices (factors) approximating the two-electron
tensor, eigenvalues of the generated factors, eigenvectors of the generated factors
**Example**
>>> symbols = ['H', 'H']
>>> geometry = np.array([[0.0, 0.0, 0.0], [0.74, 0.0, 0.0]], requires_grad = False) / 0.5291772
>>> mol = qml.qchem.Molecule(symbols, geometry)
>>> core, one, two = qml.qchem.electron_integrals(mol)()
>>> two = np.swapaxes(two, 1, 3) # convert to chemist's notation
>>> l, w, v = factorize(two, 1e-5)
>>> print(l)
[[[ 1.06723440e-01 9.73575768e-15]
[ 8.36288956e-15 -1.04898533e-01]]
[[-2.20945401e-13 -4.25688222e-01]
[-4.25688222e-01 -2.98228790e-13]]
[[-8.14472856e-01 5.01669019e-13]
[ 5.01689072e-13 -8.28642140e-01]]]
"""
n = two.shape[0]
two = two.reshape(n * n, n * n)
eigvals, eigvecs = np.linalg.eigh(two)
eigvals = np.array([val for val in eigvals if abs(val) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
vectors = eigvecs @ np.diag(np.sqrt(abs(eigvals)))
factors = np.array([vectors.reshape(n, n, len(eigvals))[:, :, r] for r in range(len(eigvals))])
eigvals, eigvecs = np.linalg.eigh(factors)
eigvals = np.array([val for val in eigvals if np.sum(abs(eigvals)) > tol])
eigvecs = eigvecs[:, -len(eigvals) :]
return factors, eigvals, eigvecs
|
15,262 |
def setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
hec = http_event_collector(token, host, "json", name, port, use_ssl)
hec.SSL_verify = verify_ssl
if not hec.check_connectivity():
_LOGGER.exception("Cannot connect to Splunk")
def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
hec.batchEvent(payload)
def splunk_event_flush(event):
hec.flushBatch()
hass.bus.listen(EVENT_STATE_CHANGED, splunk_event_listener)
hass.bus.listen(EVENT_TIME_CHANGED, splunk_event_flush)
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, splunk_event_flush)
return True
|
def setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
event_collector = http_event_collector(token, host, "json", name, port, use_ssl)
hec.SSL_verify = verify_ssl
if not hec.check_connectivity():
_LOGGER.exception("Cannot connect to Splunk")
def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
hec.batchEvent(payload)
def splunk_event_flush(event):
hec.flushBatch()
hass.bus.listen(EVENT_STATE_CHANGED, splunk_event_listener)
hass.bus.listen(EVENT_TIME_CHANGED, splunk_event_flush)
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, splunk_event_flush)
return True
|
27,993 |
def find_files(directory, file_name):
"""Return the list of files with the exact name match under
the given directory.
"""
res = set()
for input_path in directory:
input_path = os.path.abspath(input_path)
if not os.path.exists(input_path):
return []
_, _, files = next(os.walk(input_path), ([], [], []))
for f in files:
if f == file_name:
res.add(os.path.join(input_path, f))
return res
|
def find_files(directory, file_name):
"""Return the list of files with the exact name match under
the given directory.
"""
res = set()
for input_path in directory:
input_path = os.path.abspath(input_path)
if not os.path.exists(input_path):
return res
_, _, files = next(os.walk(input_path), ([], [], []))
for f in files:
if f == file_name:
res.add(os.path.join(input_path, f))
return res
|
31,742 |
def alexa_domain(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
domains = argToList(args.get('domain'))
if not domains:
raise ValueError('AlexaV2: domain doesn\'t exists')
command_results: List[CommandResults] = []
for domain in domains:
result = client.alexa_rank(domain)
domain_res = demisto.get(result,
'Awis.Results.Result.Alexa.TrafficData.DataUrl')
if domain_res == '404': # Not found on alexa
raise DemistoException('Url cannot be found')
rank = demisto.get(result,
'Awis.Results.Result.Alexa.TrafficData.Rank')
domain_standard_context: Common.Domain = rank_to_score(domain=domain_res,
rank=arg_to_number(rank),
threshold=client.threshold,
benign=client.benign,
reliability=client.reliability)
rank: str = rank if rank else 'Unknown'
result = {'Name': domain_res,
'Indicator': domain_res,
'Rank': rank}
table = {'Domain': domain_res,
'Alexa Rank': rank,
'Reputation': DBOT_SCORE_TO_TEXT.get(domain_standard_context.dbot_score.score, 'Unknown')}
readable = tableToMarkdown(f'Alexa Rank for {domain_res}', table, headers=list(table.keys()))
command_results.append(CommandResults(
outputs_prefix='Alexa.Domain',
outputs_key_field='Name',
outputs=result,
readable_output=readable,
indicator=domain_standard_context
))
return command_results
|
def alexa_domain(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
domains = argToList(args.get('domain'))
if not domains:
raise ValueError('AlexaV2: domain doesn\'t exists')
command_results: List[CommandResults] = []
for domain in domains:
result = client.alexa_rank(domain)
domain_res = demisto.get(result,
'Awis.Results.Result.Alexa.TrafficData.DataUrl')
if domain_res == '404': # Not found on alexa
raise DemistoException('Domain cannot be found')
rank = demisto.get(result,
'Awis.Results.Result.Alexa.TrafficData.Rank')
domain_standard_context: Common.Domain = rank_to_score(domain=domain_res,
rank=arg_to_number(rank),
threshold=client.threshold,
benign=client.benign,
reliability=client.reliability)
rank: str = rank if rank else 'Unknown'
result = {'Name': domain_res,
'Indicator': domain_res,
'Rank': rank}
table = {'Domain': domain_res,
'Alexa Rank': rank,
'Reputation': DBOT_SCORE_TO_TEXT.get(domain_standard_context.dbot_score.score, 'Unknown')}
readable = tableToMarkdown(f'Alexa Rank for {domain_res}', table, headers=list(table.keys()))
command_results.append(CommandResults(
outputs_prefix='Alexa.Domain',
outputs_key_field='Name',
outputs=result,
readable_output=readable,
indicator=domain_standard_context
))
return command_results
|
3,005 |
def pp_options_list(keys: List[str], width=80, _print: bool = False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name: str, ks: Iterable[str]) -> List[str]:
pfx = "- " + name + ".[" if name else ""
ls = wrap(
", ".join(ks),
width,
initial_indent=pfx,
subsequent_indent=" ",
break_long_words=False,
)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + "]"
return ls
ls: List[str] = []
singles = [x for x in sorted(keys) if x.find(".") < 0]
if singles:
ls += pp("", singles)
keys = [x for x in keys if x.find(".") >= 0]
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
ks = [x[len(k) + 1 :] for x in list(g)]
ls += pp(k, ks)
s = "\n".join(ls)
if _print:
print(s)
else:
return s
|
def pp_options_list(keys: Iterable[str], width=80, _print: bool = False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name: str, ks: Iterable[str]) -> List[str]:
pfx = "- " + name + ".[" if name else ""
ls = wrap(
", ".join(ks),
width,
initial_indent=pfx,
subsequent_indent=" ",
break_long_words=False,
)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + "]"
return ls
ls: List[str] = []
singles = [x for x in sorted(keys) if x.find(".") < 0]
if singles:
ls += pp("", singles)
keys = [x for x in keys if x.find(".") >= 0]
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
ks = [x[len(k) + 1 :] for x in list(g)]
ls += pp(k, ks)
s = "\n".join(ls)
if _print:
print(s)
else:
return s
|
57,920 |
def change_user_status_command(client: FileOrbisClient, args: Dict[str, Any]) -> CommandResults:
user_id: str = args.get('user_id') # type:ignore
status: int = int(args.get('status')) # type:ignore
client.login()
result = client.change_user_status(user_id=user_id, status=status)
client.logout()
return CommandResults(
readable_output=result.get("Message"),
outputs=result,
)
|
def change_user_status_command(client: FileOrbisClient, args: Dict[str, Any]) -> CommandResults:
user_id: str = args.get('user_id') # type:ignore
status: int = int(args.get('status')) # type:ignore
client.login()
result = client.change_user_status(user_id=user_id, status=status)
client.logout()
return CommandResults(
readable_output=result.get("Message"),
outputs=result,
outputs_prefix='FileOrbis.UserStatus',
outputs_key_field='UserID',
raw_response=result
)
|
32,125 |
def validate_vm_export_args(args: Dict[str, Any]):
"""
To validate arguments of rubrik-gps-vm-export.
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: Validated arguments for rubrik-gps-vm-export
"""
vm_name = args.get("vm_name")
object_id = validate_required_arg("object_id", args.get("object_id", ""))
snapshot_id = validate_required_arg("snapshot_id", args.get("snapshot_id", ""))
datastore_id = validate_required_arg("datastore_id", args.get("datastore_id", ""))
host_id = args.get("host_id", None)
host_compute_cluster_id = args.get('host_compute_cluster_id', None)
if not host_id and not host_compute_cluster_id:
raise ValueError(ERROR_MESSAGES['MISSING_EXPORT_DESTINATION'])
power_on = args.get("power_on")
if power_on:
power_on = validate_boolean_argument(power_on, "power_on")
keep_mac_addresses = args.get("keep_mac_addresses")
if keep_mac_addresses:
keep_mac_addresses = validate_boolean_argument(keep_mac_addresses, "keep_mac_addresses")
remove_network_devices = args.get("remove_network_devices")
if remove_network_devices:
remove_network_devices = validate_boolean_argument(remove_network_devices, "remove_network_devices")
recover_tags = args.get("recover_tags")
if recover_tags:
recover_tags = validate_boolean_argument(recover_tags, "recover_tags")
disable_network = args.get("disable_network")
if disable_network:
disable_network = validate_boolean_argument(disable_network, "disable_network")
config = {
"datastoreId": datastore_id,
"hostId": host_id,
"clusterId": host_compute_cluster_id,
"shouldRecoverTags": recover_tags,
"mountExportSnapshotJobCommonOptionsV2": {
"keepMacAddresses": keep_mac_addresses,
"removeNetworkDevices": remove_network_devices,
"vmName": vm_name,
"powerOn": power_on,
"disableNetwork": disable_network
},
"requiredRecoveryParameters": {
"snapshotId": snapshot_id
}
}
return remove_empty_elements(config), object_id
|
def validate_vm_export_args(args: Dict[str, Any]):
"""
To validate arguments of rubrik-gps-vm-export.
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: Validated arguments for rubrik-gps-vm-export
"""
vm_name = args.get("vm_name")
object_id = validate_required_arg("object_id", args.get("object_id", ""))
snapshot_id = validate_required_arg("snapshot_id", args.get("snapshot_id", ""))
datastore_id = validate_required_arg("datastore_id", args.get("datastore_id", ""))
host_id = args.get("host_id", None)
host_compute_cluster_id = args.get('host_compute_cluster_id', None)
if not host_id and not host_compute_cluster_id:
raise ValueError(ERROR_MESSAGES['MISSING_EXPORT_DESTINATION'])
power_on = argToBoolean(args.get("power_on"))
keep_mac_addresses = args.get("keep_mac_addresses")
if keep_mac_addresses:
keep_mac_addresses = validate_boolean_argument(keep_mac_addresses, "keep_mac_addresses")
remove_network_devices = args.get("remove_network_devices")
if remove_network_devices:
remove_network_devices = validate_boolean_argument(remove_network_devices, "remove_network_devices")
recover_tags = args.get("recover_tags")
if recover_tags:
recover_tags = validate_boolean_argument(recover_tags, "recover_tags")
disable_network = args.get("disable_network")
if disable_network:
disable_network = validate_boolean_argument(disable_network, "disable_network")
config = {
"datastoreId": datastore_id,
"hostId": host_id,
"clusterId": host_compute_cluster_id,
"shouldRecoverTags": recover_tags,
"mountExportSnapshotJobCommonOptionsV2": {
"keepMacAddresses": keep_mac_addresses,
"removeNetworkDevices": remove_network_devices,
"vmName": vm_name,
"powerOn": power_on,
"disableNetwork": disable_network
},
"requiredRecoveryParameters": {
"snapshotId": snapshot_id
}
}
return remove_empty_elements(config), object_id
|
14,778 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor."""
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
client = AtomeClient(username, password)
except PyAtomeError as exp:
_LOGGER.error(exp)
return False
# finally:
# client.close_session()
add_entities([AtomeSensor(name, client)])
return True
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor."""
name = config.get(CONF_NAME)
username = config[CONF_USERNAME]
password = config.get(CONF_PASSWORD)
try:
client = AtomeClient(username, password)
except PyAtomeError as exp:
_LOGGER.error(exp)
return False
# finally:
# client.close_session()
add_entities([AtomeSensor(name, client)])
return True
|
33,548 |
def _encrypt(
key_id: str, key_material: str, plaintext: str, encryption_context: EncryptionContextType
) -> str:
if plaintext == b"":
raise ValidationException(
"1 validation error detected: Value at 'plaintext' failed to satisfy constraint: Member must have length greater than or equal to 1"
)
iv = os.urandom(IV_LEN)
aad = _serialize_encryption_context(encryption_context=encryption_context)
encryptor = Cipher(
algorithms.AES(key_material), modes.GCM(iv), backend=default_backend()
).encryptor()
encryptor.authenticate_additional_data(aad)
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return _serialize_ciphertext_blob(
ciphertext=Ciphertext(key_id=key_id, iv=iv, ciphertext=ciphertext, tag=encryptor.tag)
)
|
def _encrypt(
key_id: str, key_material: str, plaintext: bytes, encryption_context: EncryptionContextType
) -> str:
if plaintext == b"":
raise ValidationException(
"1 validation error detected: Value at 'plaintext' failed to satisfy constraint: Member must have length greater than or equal to 1"
)
iv = os.urandom(IV_LEN)
aad = _serialize_encryption_context(encryption_context=encryption_context)
encryptor = Cipher(
algorithms.AES(key_material), modes.GCM(iv), backend=default_backend()
).encryptor()
encryptor.authenticate_additional_data(aad)
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return _serialize_ciphertext_blob(
ciphertext=Ciphertext(key_id=key_id, iv=iv, ciphertext=ciphertext, tag=encryptor.tag)
)
|
42,455 |
def is_simple_decorator_expression(node: LN) -> bool:
"""Return True iff `node` is the 'dotted name' in the grammar @ dotted_name [arguments] NEWLINE"""
if node.type == token.NAME:
return True
if node.type == syms.power:
if node.children:
return (
node.children[0].type == token.NAME
and all(map(is_simple_decorator_trailer, node.children[1:-1]))
and (
len(node.children) < 2
or is_simple_decorator_trailer(node.children[-1], last=True)
)
)
return False
|
def is_simple_decorator_expression(node: LN) -> bool:
"""Return True iff `node` is the 'namedexpr_test' in the grammar @ namedexpr_test [arguments] NEWLINE"""
if node.type == token.NAME:
return True
if node.type == syms.power:
if node.children:
return (
node.children[0].type == token.NAME
and all(map(is_simple_decorator_trailer, node.children[1:-1]))
and (
len(node.children) < 2
or is_simple_decorator_trailer(node.children[-1], last=True)
)
)
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.