id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
42,934 |
def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None:
""" Creates a plot.ly plot of the input graph.
The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The
function can plot just the input graph or the graph with a specified subgraph highlighted.
The function uses the standard colour theme of green nodes, grey edges, and red highlighted
subgraph.
**Example usage**:
>>> graph = nx.complete_graph(10)
>>> fig = plot(graph, [0, 1, 2, 3])
>>> fig.show()
Args:
graph (nx.Graph): input graph
subgraph (list): list of nodes comprising the subgraph to highlight
size (dict): size of the plot
Returns:
plot.ly graph object
"""
s = graph.subgraph(subgraph)
l = nx.kamada_kawai_layout(graph)
g_nodes = go.Scatter(
**_node_coords(graph, l),
mode='markers',
hoverinfo='text',
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2)
)
g_edges = go.Scatter(
**edge_coords(graph, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo='none',
mode='lines'
)
g_nodes.text = [str(i) for i in graph.nodes()]
layout = go.Layout(showlegend=False, hovermode='closest',
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=size,
width=size,
plot_bgcolor='#ffffff'
)
if subgraph:
s_edges = go.Scatter(
**edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo='none',
mode='lines'
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode='markers',
hoverinfo='text',
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2)
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
|
def plot(graph: nx.Graph, subgraph: Optional[list] = None, size: float = 500) -> None:
""" Creates a plot.ly plot of the input graph.
The graph layout is fixed to be the Kamada-Kawai layout with an aspect ratio of 1:1. The
function can plot just the input graph or the graph with a specified subgraph highlighted.
The function uses the standard colour theme of green nodes, grey edges, and red highlighted
subgraph.
**Example usage**:
>>> graph = nx.complete_graph(10)
>>> fig = plot(graph, [0, 1, 2, 3])
>>> fig.show()
Args:
graph (nx.Graph): input graph
subgraph (list): optional list of nodes comprising the subgraph to highlight
size (dict): size of the plot
Returns:
plot.ly graph object
"""
s = graph.subgraph(subgraph)
l = nx.kamada_kawai_layout(graph)
g_nodes = go.Scatter(
**_node_coords(graph, l),
mode='markers',
hoverinfo='text',
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2)
)
g_edges = go.Scatter(
**edge_coords(graph, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo='none',
mode='lines'
)
g_nodes.text = [str(i) for i in graph.nodes()]
layout = go.Layout(showlegend=False, hovermode='closest',
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=size,
width=size,
plot_bgcolor='#ffffff'
)
if subgraph:
s_edges = go.Scatter(
**edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo='none',
mode='lines'
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode='markers',
hoverinfo='text',
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2)
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
|
29,932 |
def is_dem_source_available(source, lon_ex, lat_ex):
"""Checks if a DEM source is available for your purpose.
This is only a very rough check! It doesn't mean that the data really is
available, but at least it's worth a try.
Parameters
----------
source : str, required
the source you want to check for
lon_ex : tuple or int, required
a (min_lon, max_lon) tuple delimiting the requested area longitudes
lat_ex : tuple or int, required
a (min_lat, max_lat) tuple delimiting the requested area latitudes
Returns
-------
True or False
"""
from oggm.utils import tolist
lon_ex = tolist(lon_ex, length=2)
lat_ex = tolist(lat_ex, length=2)
def _in_grid(grid_json, lon, lat):
i, j = cfg.DATA['dem_grids'][grid_json].transform(lon, lat,
maskout=True)
return np.all(~ (i.mask | j.mask))
if source == 'GIMP':
return _in_grid('gimpdem_90m_v01.1.json', lon_ex, lat_ex)
elif source == 'ARCTICDEM':
return _in_grid('arcticdem_mosaic_100m_v3.0.json', lon_ex, lat_ex)
elif source == 'RAMP':
return _in_grid('AntarcticDEM_wgs84.json', lon_ex, lat_ex)
elif source == 'REMA':
return _in_grid('REMA_100m_dem.json', lon_ex, lat_ex)
elif source == 'ALASKA':
return _in_grid('Alaska_albers_V3.json', lon_ex, lat_ex)
elif source == 'TANDEM':
return True
elif source == 'AW3D30':
return np.min(lat_ex) > -60
elif source == 'MAPZEN':
return True
elif source == 'DEM3':
return True
elif source == 'ASTER':
return True
elif source == 'SRTM':
return np.max(np.abs(lat_ex)) < 60
elif (source == 'COPDEM30') or (source == 'COPDEM90'):
return True
elif source == 'NASADEM':
return (np.min(lat_ex) > -56) and (np.max(lat_ex) < 60)
elif source == 'USER':
return True
elif source is None:
return True
|
def is_dem_source_available(source, lon_ex, lat_ex):
"""Checks if a DEM source is available for your purpose.
This is only a very rough check! It doesn't mean that the data really is
available, but at least it's worth a try.
Parameters
----------
source : str, required
the source you want to check for
lon_ex : tuple or int, required
a (min_lon, max_lon) tuple delimiting the requested area longitudes
lat_ex : tuple or int, required
a (min_lat, max_lat) tuple delimiting the requested area latitudes
Returns
-------
True or False
"""
from oggm.utils import tolist
lon_ex = tolist(lon_ex, length=2)
lat_ex = tolist(lat_ex, length=2)
def _in_grid(grid_json, lon, lat):
i, j = cfg.DATA['dem_grids'][grid_json].transform(lon, lat,
maskout=True)
return np.all(~ (i.mask | j.mask))
if source == 'GIMP':
return _in_grid('gimpdem_90m_v01.1.json', lon_ex, lat_ex)
elif source == 'ARCTICDEM':
return _in_grid('arcticdem_mosaic_100m_v3.0.json', lon_ex, lat_ex)
elif source == 'RAMP':
return _in_grid('AntarcticDEM_wgs84.json', lon_ex, lat_ex)
elif source == 'REMA':
return _in_grid('REMA_100m_dem.json', lon_ex, lat_ex)
elif source == 'ALASKA':
return _in_grid('Alaska_albers_V3.json', lon_ex, lat_ex)
elif source == 'TANDEM':
return True
elif source == 'AW3D30':
return np.min(lat_ex) > -60
elif source == 'MAPZEN':
return True
elif source == 'DEM3':
return True
elif source == 'ASTER':
return True
elif source == 'SRTM':
return np.max(np.abs(lat_ex)) < 60
elif source in ['COPDEM30', 'COPDEM90']:
return True
elif source == 'NASADEM':
return (np.min(lat_ex) > -56) and (np.max(lat_ex) < 60)
elif source == 'USER':
return True
elif source is None:
return True
|
43,038 |
def sample_fock(
input_state: list,
t: float,
Ul: np.ndarray,
w: np.ndarray,
n_samples: int = 1,
loss: float = 0.0,
cutoff: int = 5,
) -> list:
r"""Generate samples for simulating vibrational quantum dynamics with a Fock input state.
**Example usage:**
>>> input_state = [0, 2]
>>> t = 10.0
>>> Ul = np.array([[0.707106781, -0.707106781],
>>> [0.707106781, 0.707106781]])
>>> w = np.array([3914.92, 3787.59])
>>> n_samples = 5
>>> sample_fock(input_state, t, Ul, w, n_samples = n_samples)
[[0, 2], [0, 2], [1, 1], [0, 2], [0, 2]]
Args:
input_state (list): input Fock state
t (float): time in femtoseconds
Ul (array): normal to local transformation matrix
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}`
n_samples (int): number of samples to be generated
loss (float): loss parameter denoting the fraction of generated photons that are lost
cutoff (int): cutoff dimension for determining the set of number states for each mode
Returns:
list[list[int]]: a list of samples
"""
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if t < 0:
raise ValueError("Time must be zero or positive")
if np.any(w <= 0):
raise ValueError("Vibrational frequencies must be larger than zero")
if np.any(np.iscomplex(Ul)):
raise ValueError("The normal mode to local mode transformation matrix must be real")
if not 0 <= loss <= 1:
raise ValueError("Loss parameter must take a value between zero and one")
modes = len(Ul)
op = evolution(modes)
s = []
eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff})
gbs = sf.Program(modes)
# pylint: disable=expression-not-assigned
with gbs.context as q:
for i in range(modes):
sf.ops.Fock(input_state[i]) | q[i]
op(t, Ul, w) | q
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
sf.ops.MeasureFock() | q
for _ in range(n_samples):
s.append(eng.run(gbs).samples[0].tolist())
return s
|
def sample_fock(
input_state: list,
t: float,
Ul: np.ndarray,
w: np.ndarray,
n_samples: int = 1,
loss: float = 0.0,
cutoff: int = 5,
) -> list:
r"""Generate samples for simulating vibrational quantum dynamics with a Fock input state.
**Example usage:**
>>> input_state = [0, 2]
>>> t = 10.0
>>> Ul = np.array([[0.707106781, -0.707106781],
>>> [0.707106781, 0.707106781]])
>>> w = np.array([3914.92, 3787.59])
>>> n_samples = 5
>>> sample_fock(input_state, t, Ul, w, n_samples = n_samples)
[[0, 2], [0, 2], [1, 1], [0, 2], [0, 2]]
Args:
input_state (list): input Fock state
t (float): time in femtoseconds
Ul (array): normal-to-local transformation matrix
w (array): normal mode frequencies :math:`\omega` in units of :math:`\mbox{cm}^{-1}`
n_samples (int): number of samples to be generated
loss (float): loss parameter denoting the fraction of generated photons that are lost
cutoff (int): cutoff dimension for determining the set of number states for each mode
Returns:
list[list[int]]: a list of samples
"""
if n_samples < 1:
raise ValueError("Number of samples must be at least one")
if t < 0:
raise ValueError("Time must be zero or positive")
if np.any(w <= 0):
raise ValueError("Vibrational frequencies must be larger than zero")
if np.any(np.iscomplex(Ul)):
raise ValueError("The normal mode to local mode transformation matrix must be real")
if not 0 <= loss <= 1:
raise ValueError("Loss parameter must take a value between zero and one")
modes = len(Ul)
op = evolution(modes)
s = []
eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff})
gbs = sf.Program(modes)
# pylint: disable=expression-not-assigned
with gbs.context as q:
for i in range(modes):
sf.ops.Fock(input_state[i]) | q[i]
op(t, Ul, w) | q
if loss:
for _q in q:
sf.ops.LossChannel(1 - loss) | _q
sf.ops.MeasureFock() | q
for _ in range(n_samples):
s.append(eng.run(gbs).samples[0].tolist())
return s
|
57,919 |
def list_used_docker_images() -> CommandResults:
md = None
active_docker_list_integration = {}
active_docker_list_automation = {}
export_to_context = demisto.args().get('export_to_context') == 'true'
ignore_deprecated_automations = demisto.args().get('ignore_deprecated_automations') == 'true'
ignore_deprecated_integrations = demisto.args().get('ignore_deprecated_integrations') == 'true'
ignore_disabled_integrations = demisto.args().get('ignore_disabled_integrations') == 'true'
''' Examples for output: { 'demisto/python3:3.9.7.24076' : ['ListUsedDockerImage', 'VirusTotal',...]}'''
result_dict: Dict[str, List[str]] = {}
active_integration_instances = demisto.internalHttpRequest(POST_COMMAND, '/settings/integration/search',
'{\"size\":500}')
demisto.debug(f'response code = {0}', active_integration_instances['statusCode'])
if active_integration_instances and active_integration_instances['statusCode'] == 200:
active_docker_list_integration = extract_dockers_from_integration_search_result(
active_integration_instances['body'], ignore_deprecated_integrations, ignore_disabled_integrations)
active_automation = demisto.internalHttpRequest(POST_COMMAND, '/automation/search',
'{\"size\":500}')
demisto.debug(f'response code = {0}', active_automation['statusCode'])
if active_automation and active_automation['statusCode'] == 200:
active_docker_list_automation = extract_dockers_from_automation_search_result(
active_automation['body'], ignore_deprecated_automations)
result_dict = merge_result(active_docker_list_integration, result_dict, MAX_PER_DOCKER)
result_dict = merge_result(active_docker_list_automation, result_dict, MAX_PER_DOCKER)
''' format the result for Markdown view'''
result_output = []
result_output = format_result_for_markdown(result_dict)
md = tableToMarkdown('Docker Images In use:', result_output, headers=['DockerImage', 'ContentItem'],
headerTransform=pascalToSpace)
if export_to_context:
return CommandResults(
outputs_prefix='UsedDockerImages',
outputs_key_field='DockerImage',
outputs=result_output,
raw_response=result_dict,
readable_output=md)
else:
return CommandResults(readable_output=md)
|
def list_used_docker_images() -> CommandResults:
md = None
active_docker_list_integration = {}
active_docker_list_automation = {}
export_to_context = demisto.args().get('export_to_context') == 'true'
ignore_deprecated_automations = demisto.args().get('ignore_deprecated_automations') == 'true'
ignore_deprecated_integrations = demisto.args().get('ignore_deprecated_integrations') == 'true'
ignore_disabled_integrations = demisto.args().get('ignore_disabled_integrations') == 'true'
''' Examples for output: { 'demisto/python3:3.9.7.24076' : ['ListUsedDockerImage', 'VirusTotal',...]}'''
result_dict: Dict[str, List[str]] = {}
active_integration_instances = demisto.internalHttpRequest(POST_COMMAND, '/settings/integration/search',
'{\"size\":500}')
demisto.debug(f'response code = {0}', active_integration_instances['statusCode'])
if active_integration_instances and active_integration_instances['statusCode'] == 200:
active_docker_list_integration = extract_dockers_from_integration_search_result(
active_integration_instances['body'], False, True
)
active_automation = demisto.internalHttpRequest(POST_COMMAND, '/automation/search',
'{\"size\":500}')
demisto.debug(f'response code = {0}', active_automation['statusCode'])
if active_automation and active_automation['statusCode'] == 200:
active_docker_list_automation = extract_dockers_from_automation_search_result(
active_automation['body'], ignore_deprecated_automations)
result_dict = merge_result(active_docker_list_integration, result_dict, MAX_PER_DOCKER)
result_dict = merge_result(active_docker_list_automation, result_dict, MAX_PER_DOCKER)
''' format the result for Markdown view'''
result_output = []
result_output = format_result_for_markdown(result_dict)
md = tableToMarkdown('Docker Images In use:', result_output, headers=['DockerImage', 'ContentItem'],
headerTransform=pascalToSpace)
if export_to_context:
return CommandResults(
outputs_prefix='UsedDockerImages',
outputs_key_field='DockerImage',
outputs=result_output,
raw_response=result_dict,
readable_output=md)
else:
return CommandResults(readable_output=md)
|
17,721 |
def to_parmed(compound, box=None, title='', residues=None,
show_ports=False, infer_residues=False):
""" Create a Parmed Structure from a Compound.
Parameters
----------
compound : mb.Compound
mbuild Compound that need to be converted.
box : mb.Box, optional, default=compound.boundingbox (with buffer)
Box information to be used when converting to a `Structure`.
If 'None', a bounding box is used with 0.25nm buffers at
each face to avoid overlapping atoms, unless `compound.periodicity`
is not None, in which case those values are used for the
box lengths.
title : str, optional, default=compound.name
Title/name of the ParmEd Structure
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
show_ports : boolean, optional, default=False
Include all port atoms when converting to a `Structure`.
infer_residues : bool, optional, default=False
Attempt to assign residues based on names of children.
Return
------
parmed.structure.Structure
ParmEd Structure object converted from compound
See Also
--------
parmed.structure.Structure : Details on the ParmEd Structure object
"""
structure = pmd.Structure()
structure.title = title if title else compound.name
atom_mapping = {} # For creating bonds below
guessed_elements = set()
# Attempt to grab residue names based on names of children
if not residues and infer_residues:
residues = list(set([child.name for child in compound.children]))
if isinstance(residues, str):
residues = [residues]
if isinstance(residues, (list, set)):
residues = tuple(residues)
default_residue = pmd.Residue('RES')
port_residue = pmd.Residue('PRT')
compound_residue_map = dict()
atom_residue_map = dict()
# Loop through particles and add initialize ParmEd atoms
for atom in compound.particles(include_ports=show_ports):
if atom.port_particle:
current_residue = port_residue
atom_residue_map[atom] = current_residue
if current_residue not in structure.residues:
structure.residues.append(current_residue)
pmd_atom = pmd.Atom(atomic_number=0, name='VS',
mass=0, charge=0)
pmd_atom.xx, pmd_atom.xy, pmd_atom.xz = atom.pos * 10 # Angstroms
else:
if residues and atom.name in residues:
current_residue = pmd.Residue(atom.name)
atom_residue_map[atom] = current_residue
compound_residue_map[atom] = current_residue
elif residues:
for parent in atom.ancestors():
if residues and parent.name in residues:
if parent not in compound_residue_map:
current_residue = pmd.Residue(parent.name)
compound_residue_map[parent] = current_residue
atom_residue_map[atom] = current_residue
break
else: # Did not find specified residues in ancestors.
current_residue = default_residue
atom_residue_map[atom] = current_residue
else:
current_residue = default_residue
atom_residue_map[atom] = current_residue
if current_residue not in structure.residues:
structure.residues.append(current_residue)
atomic_number = None
name = ''.join(char for char in atom.name if not char.isdigit())
try:
atomic_number = AtomicNum[atom.name.capitalize()]
except KeyError:
element = element_by_name(atom.name.capitalize())
if name not in guessed_elements:
warn(
'Guessing that "{}" is element: "{}"'.format(
atom, element))
guessed_elements.add(name)
else:
element = atom.name.capitalize()
atomic_number = atomic_number or AtomicNum[element]
mass = Mass[element]
pmd_atom = pmd.Atom(atomic_number=atomic_number, name=atom.name,
mass=mass, charge=atom.charge)
pmd_atom.xx, pmd_atom.xy, pmd_atom.xz = atom.pos * 10 # Angstroms
residue = atom_residue_map[atom]
structure.add_atom(pmd_atom, resname=residue.name,
resnum=residue.idx)
atom_mapping[atom] = pmd_atom
# "Claim" all of the items it contains and subsequently index all of its items
structure.residues.claim()
# Create and add bonds to ParmEd Structure
for atom1, atom2 in compound.bonds():
bond = pmd.Bond(atom_mapping[atom1], atom_mapping[atom2])
structure.bonds.append(bond)
# pad box with .25nm buffers
if box is None:
box = compound.boundingbox
box_vec_max = box.maxs.tolist()
box_vec_min = box.mins.tolist()
for dim, val in enumerate(compound.periodicity):
if val:
box_vec_max[dim] = val
box_vec_min[dim] = 0.0
if not val:
box_vec_max[dim] += 0.25
box_vec_min[dim] -= 0.25
box = Box(mins=box_vec_min, maxs=box_vec_max)
box_vector = np.empty(6)
if box.angles is not None:
box_vector[3:6] = box.angles
else:
box_vector[3] = box_vector[4] = box_vector[5] = 90.0
for dim in range(3):
box_vector[dim] = box.lengths[dim] * 10
structure.box = box_vector
return structure
|
def to_parmed(compound, box=None, title='', residues=None,
show_ports=False, infer_residues=False):
""" Create a Parmed Structure from a Compound.
Parameters
----------
compound : mb.Compound
mbuild Compound that need to be converted.
box : mb.Box, optional, default=compound.boundingbox (with buffer)
Box information to be used when converting to a `Structure`.
If 'None', a bounding box is used with 0.25nm buffers at
each face to avoid overlapping atoms, unless `compound.periodicity`
is not None, in which case those values are used for the
box lengths.
title : str, optional, default=compound.name
Title/name of the ParmEd Structure
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
show_ports : boolean, optional, default=False
Include all port atoms when converting to a `Structure`.
infer_residues : bool, optional, default=False
Attempt to assign residues based on names of children.
Returns
------
parmed.structure.Structure
ParmEd Structure object converted from compound
See Also
--------
parmed.structure.Structure : Details on the ParmEd Structure object
"""
structure = pmd.Structure()
structure.title = title if title else compound.name
atom_mapping = {} # For creating bonds below
guessed_elements = set()
# Attempt to grab residue names based on names of children
if not residues and infer_residues:
residues = list(set([child.name for child in compound.children]))
if isinstance(residues, str):
residues = [residues]
if isinstance(residues, (list, set)):
residues = tuple(residues)
default_residue = pmd.Residue('RES')
port_residue = pmd.Residue('PRT')
compound_residue_map = dict()
atom_residue_map = dict()
# Loop through particles and add initialize ParmEd atoms
for atom in compound.particles(include_ports=show_ports):
if atom.port_particle:
current_residue = port_residue
atom_residue_map[atom] = current_residue
if current_residue not in structure.residues:
structure.residues.append(current_residue)
pmd_atom = pmd.Atom(atomic_number=0, name='VS',
mass=0, charge=0)
pmd_atom.xx, pmd_atom.xy, pmd_atom.xz = atom.pos * 10 # Angstroms
else:
if residues and atom.name in residues:
current_residue = pmd.Residue(atom.name)
atom_residue_map[atom] = current_residue
compound_residue_map[atom] = current_residue
elif residues:
for parent in atom.ancestors():
if residues and parent.name in residues:
if parent not in compound_residue_map:
current_residue = pmd.Residue(parent.name)
compound_residue_map[parent] = current_residue
atom_residue_map[atom] = current_residue
break
else: # Did not find specified residues in ancestors.
current_residue = default_residue
atom_residue_map[atom] = current_residue
else:
current_residue = default_residue
atom_residue_map[atom] = current_residue
if current_residue not in structure.residues:
structure.residues.append(current_residue)
atomic_number = None
name = ''.join(char for char in atom.name if not char.isdigit())
try:
atomic_number = AtomicNum[atom.name.capitalize()]
except KeyError:
element = element_by_name(atom.name.capitalize())
if name not in guessed_elements:
warn(
'Guessing that "{}" is element: "{}"'.format(
atom, element))
guessed_elements.add(name)
else:
element = atom.name.capitalize()
atomic_number = atomic_number or AtomicNum[element]
mass = Mass[element]
pmd_atom = pmd.Atom(atomic_number=atomic_number, name=atom.name,
mass=mass, charge=atom.charge)
pmd_atom.xx, pmd_atom.xy, pmd_atom.xz = atom.pos * 10 # Angstroms
residue = atom_residue_map[atom]
structure.add_atom(pmd_atom, resname=residue.name,
resnum=residue.idx)
atom_mapping[atom] = pmd_atom
# "Claim" all of the items it contains and subsequently index all of its items
structure.residues.claim()
# Create and add bonds to ParmEd Structure
for atom1, atom2 in compound.bonds():
bond = pmd.Bond(atom_mapping[atom1], atom_mapping[atom2])
structure.bonds.append(bond)
# pad box with .25nm buffers
if box is None:
box = compound.boundingbox
box_vec_max = box.maxs.tolist()
box_vec_min = box.mins.tolist()
for dim, val in enumerate(compound.periodicity):
if val:
box_vec_max[dim] = val
box_vec_min[dim] = 0.0
if not val:
box_vec_max[dim] += 0.25
box_vec_min[dim] -= 0.25
box = Box(mins=box_vec_min, maxs=box_vec_max)
box_vector = np.empty(6)
if box.angles is not None:
box_vector[3:6] = box.angles
else:
box_vector[3] = box_vector[4] = box_vector[5] = 90.0
for dim in range(3):
box_vector[dim] = box.lengths[dim] * 10
structure.box = box_vector
return structure
|
35,647 |
def resnext50_32x4d(weights: Optional[ResNeXt50_32x4dWeights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNeXt50_32x4dWeights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNeXt50_32x4dWeights.verify(weights)
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet(BasicBlock, [3, 4, 6, 3], weights, progress, **kwargs)
|
def resnext50_32x4d(weights: Optional[ResNeXt50_32x4dWeights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNeXt50_32x4dWeights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNeXt50_32x4dWeights.verify(weights)
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet(Bottleneck, [3, 4, 6, 3], weights, progress, **kwargs)
|
33,510 |
def get_stage_variables(context: ApiInvocationContext) -> Optional[Dict[str, str]]:
if is_test_invoke_method(context.method, context.path):
return None
if not context.stage:
return {}
region_name = [
name
for name, region in apigateway_models.apigateway_backends.items()
if context.api_id in region.apis
][0]
api_gateway_client = aws_stack.connect_to_service("apigateway", region_name=region_name)
try:
response = api_gateway_client.get_stage(restApiId=context.api_id, stageName=context.stage)
return response.get("variables")
except Exception:
LOG.info(f"Failed to get stage {context.stage} for api id " f"{context.api_id}")
return {}
|
def get_stage_variables(context: ApiInvocationContext) -> Optional[Dict[str, str]]:
if is_test_invoke_method(context.method, context.path):
return None
if not context.stage:
return {}
region_name = [
name
for name, region in apigateway_models.apigateway_backends.items()
if context.api_id in region.apis
][0]
api_gateway_client = aws_stack.connect_to_service("apigateway", region_name=region_name)
try:
response = api_gateway_client.get_stage(restApiId=context.api_id, stageName=context.stage)
return response.get("variables")
except Exception:
LOG.info("Failed to get stage %s for API id %s", context.stage, context.api_id)
return {}
|
3,867 |
def is_perfect_matching(G, matching):
"""Return True is ``matching`` is a perfect matching for ``G``
A *perfect matching* in a graph is a matching in which exactly one edge
is incident upon each vertex.
Parameters
----------
G : NetworkX graph
matching : dict or set
A dictionary or set representing a matching. If a dictionary, it
must have ``matching[u] == v`` and ``matching[v] == u`` for each
edge ``(u, v)`` in the matching. If a set, it must have elements
of the form ``(u, v)``, where ``(u, v)`` is an edge in the
matching.
Returns
-------
bool
Whether the given set or dictionary represents a valid perfect
matching in the graph.
"""
if isinstance(matching, dict):
matching = matching_dict_to_set(matching)
if not is_matching(G, matching):
return False
counts = Counter(sum(matching, ()))
return all(counts[v] == 1 for v in G)
|
def is_perfect_matching(G, matching):
"""Return True if ``matching`` is a perfect matching for ``G``
A *perfect matching* in a graph is a matching in which exactly one edge
is incident upon each vertex.
Parameters
----------
G : NetworkX graph
matching : dict or set
A dictionary or set representing a matching. If a dictionary, it
must have ``matching[u] == v`` and ``matching[v] == u`` for each
edge ``(u, v)`` in the matching. If a set, it must have elements
of the form ``(u, v)``, where ``(u, v)`` is an edge in the
matching.
Returns
-------
bool
Whether the given set or dictionary represents a valid perfect
matching in the graph.
"""
if isinstance(matching, dict):
matching = matching_dict_to_set(matching)
if not is_matching(G, matching):
return False
counts = Counter(sum(matching, ()))
return all(counts[v] == 1 for v in G)
|
36,278 |
def measure_observables(qc: QuantumComputer, tomo_experiment: TomographyExperiment,
n_shots: int = 10000,
progress_callback: Optional[Callable[[int, int], None]] = None,
active_reset = False,
symmetrize_readout: Optional[str] = 'exhaustive',
calibrate_readout: Optional[str] = 'plus-eig',
readout_symmetrize: Optional[str] = None,
show_progress_bar: bool = False):
"""
Measure all the observables in a TomographyExperiment.
:param qc: A QuantumComputer which can run quantum programs
:param tomo_experiment: A suite of tomographic observables to measure
:param n_shots: The number of shots to take per ExperimentSetting
:param progress_callback: If not None, this function is called each time a group of
settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress
is ``i / len(tomo_experiment)``.
:param active_reset: Whether to actively reset qubits instead of waiting several
times the coherence length for qubits to decay to |0> naturally. Setting this
to True is much faster but there is a ~1% error per qubit in the reset operation.
Thermal noise from "traditional" reset is not routinely characterized but is of the same
order.
:param symmetrize_readout: Method used to symmetrize the readout errors, i.e. set
p(0|1) = p(1|0). For uncorrelated readout errors, this can be achieved by randomly
selecting between the POVMs {X.D1.X, X.D0.X} and {D0, D1} (where both D0 and D1 are
diagonal). However, here we currently support exhaustive symmetrization and loop through
all possible 2^n POVMs {X/I . POVM . X/I}^n, and obtain symmetrization more generally,
i.e. set p(00|00) = p(01|01) = .. = p(11|11), as well as p(00|01) = p(01|00) etc. If this
is None, no symmetrization is performed. The exhaustive method can be specified by setting
this variable to 'exhaustive' (default value). Set to `None` if no symmetrization is
desired.
:param calibrate_readout: Method used to calibrate the readout results. Currently, the only
method supported is normalizing against the operator's expectation value in its +1
eigenstate, which can be specified by setting this variable to 'plus-eig' (default value).
The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired.
:param show_progress_bar: displays a progress bar via tqdm if true.
"""
if readout_symmetrize is not None:
warnings.warn("'readout_symmetrize' has been renamed to 'symmetrize_readout'",
DeprecationWarning)
symmetrize_readout = readout_symmetrize
# calibration readout only works with symmetrization turned on
if calibrate_readout is not None and symmetrize_readout is None:
raise ValueError("Readout calibration only works with readout symmetrization turned on")
# generate programs for each group of simultaneous settings.
programs, meas_qubits = generate_experiment_programs(tomo_experiment, active_reset)
# Outer loop over a collection of grouped settings for which we can simultaneously
# estimate.
for i, (prog, qubits, settings) in enumerate(zip(tqdm(programs, disable=not show_progress_bar),
meas_qubits, tomo_experiment)):
if symmetrize_readout == 'exhaustive' and len(qubits) > 0:
bitstrings, d_qub_idx = _exhaustive_symmetrization(qc, qubits, n_shots, prog)
elif symmetrize_readout is None and len(qubits) > 0:
total_prog_no_symm = prog.copy()
ro = total_prog_no_symm.declare('ro', 'BIT', len(qubits))
d_qub_idx = {}
for j, q in enumerate(qubits):
total_prog_no_symm += MEASURE(q, ro[j])
# Keep track of qubit-classical register mapping via dict
d_qub_idx[q] = j
total_prog_no_symm.wrap_in_numshots_loop(n_shots)
total_prog_no_symm_native = qc.compiler.quil_to_native_quil(total_prog_no_symm)
total_prog_no_symm_bin = qc.compiler.native_quil_to_executable(total_prog_no_symm_native)
bitstrings = qc.run(total_prog_no_symm_bin)
elif len(qubits) == 0:
# looks like an identity operation
pass
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
if progress_callback is not None:
progress_callback(i, len(tomo_experiment))
# 3. Post-process
# Inner loop over the grouped settings. They only differ in which qubits' measurements
# we include in the post-processing. For example, if `settings` is Z1, Z2, Z1Z2 and we
# measure (n_shots, n_qubits=2) obs_strings then the full operator value involves selecting
# either the first column, second column, or both and multiplying along the row.
for setting in settings:
# 3.1 Get the term's coefficient so we can multiply it in later.
coeff = complex(setting.out_operator.coefficient)
if not np.isclose(coeff.imag, 0):
raise ValueError(f"{setting}'s out_operator has a complex coefficient.")
coeff = coeff.real
# 3.2 Special case for measuring the "identity" operator, which doesn't make much
# sense but should happen perfectly.
if is_identity(setting.out_operator):
yield ExperimentResult(
setting=setting,
expectation=coeff,
std_err=0.0,
total_counts=n_shots,
)
continue
# 3.3 Obtain statistics from result of experiment
obs_mean, obs_var = _stats_from_measurements(bitstrings, d_qub_idx, setting, n_shots, coeff)
if calibrate_readout == 'plus-eig':
# 4 Readout calibration
# 4.1 Obtain calibration program
calibr_prog = _calibration_program(qc, tomo_experiment, setting)
# 4.2 Perform symmetrization on the calibration program
if symmetrize_readout == 'exhaustive':
qubs_calibr = setting.out_operator.get_qubits()
calibr_shots = n_shots
calibr_results, d_calibr_qub_idx = _exhaustive_symmetrization(qc, qubs_calibr, calibr_shots, calibr_prog)
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
# 4.3 Obtain statistics from the measurement process
obs_calibr_mean, obs_calibr_var = _stats_from_measurements(calibr_results, d_calibr_qub_idx, setting, calibr_shots)
# 4.3 Calibrate the readout results
corrected_mean = obs_mean / obs_calibr_mean
corrected_var = ratio_variance(obs_mean, obs_var, obs_calibr_mean, obs_calibr_var)
yield ExperimentResult(
setting=setting,
expectation=corrected_mean.item(),
std_err=np.sqrt(corrected_var).item(),
total_counts=n_shots,
raw_expectation=obs_mean.item(),
raw_std_err=np.sqrt(obs_var).item(),
calibration_expectation=obs_calibr_mean.item(),
calibration_std_err=np.sqrt(obs_calibr_var).item(),
calibration_counts=calibr_shots,
)
elif calibrate_readout is None:
# No calibration
yield ExperimentResult(
setting=setting,
expectation=obs_mean.item(),
std_err=np.sqrt(obs_var).item(),
total_counts=n_shots,
)
else:
raise ValueError("Calibration readout method must be either 'plus-eig' or None")
|
def measure_observables(qc: QuantumComputer, tomo_experiment: TomographyExperiment,
n_shots: int = 10000,
progress_callback: Optional[Callable[[int, int], None]] = None,
active_reset: bool = False,
symmetrize_readout: Optional[str] = 'exhaustive',
calibrate_readout: Optional[str] = 'plus-eig',
readout_symmetrize: Optional[str] = None,
show_progress_bar: bool = False):
"""
Measure all the observables in a TomographyExperiment.
:param qc: A QuantumComputer which can run quantum programs
:param tomo_experiment: A suite of tomographic observables to measure
:param n_shots: The number of shots to take per ExperimentSetting
:param progress_callback: If not None, this function is called each time a group of
settings is run with arguments ``f(i, len(tomo_experiment)`` such that the progress
is ``i / len(tomo_experiment)``.
:param active_reset: Whether to actively reset qubits instead of waiting several
times the coherence length for qubits to decay to |0> naturally. Setting this
to True is much faster but there is a ~1% error per qubit in the reset operation.
Thermal noise from "traditional" reset is not routinely characterized but is of the same
order.
:param symmetrize_readout: Method used to symmetrize the readout errors, i.e. set
p(0|1) = p(1|0). For uncorrelated readout errors, this can be achieved by randomly
selecting between the POVMs {X.D1.X, X.D0.X} and {D0, D1} (where both D0 and D1 are
diagonal). However, here we currently support exhaustive symmetrization and loop through
all possible 2^n POVMs {X/I . POVM . X/I}^n, and obtain symmetrization more generally,
i.e. set p(00|00) = p(01|01) = .. = p(11|11), as well as p(00|01) = p(01|00) etc. If this
is None, no symmetrization is performed. The exhaustive method can be specified by setting
this variable to 'exhaustive' (default value). Set to `None` if no symmetrization is
desired.
:param calibrate_readout: Method used to calibrate the readout results. Currently, the only
method supported is normalizing against the operator's expectation value in its +1
eigenstate, which can be specified by setting this variable to 'plus-eig' (default value).
The preceding symmetrization and this step together yield a more accurate estimation of the observable. Set to `None` if no calibration is desired.
:param show_progress_bar: displays a progress bar via tqdm if true.
"""
if readout_symmetrize is not None:
warnings.warn("'readout_symmetrize' has been renamed to 'symmetrize_readout'",
DeprecationWarning)
symmetrize_readout = readout_symmetrize
# calibration readout only works with symmetrization turned on
if calibrate_readout is not None and symmetrize_readout is None:
raise ValueError("Readout calibration only works with readout symmetrization turned on")
# generate programs for each group of simultaneous settings.
programs, meas_qubits = generate_experiment_programs(tomo_experiment, active_reset)
# Outer loop over a collection of grouped settings for which we can simultaneously
# estimate.
for i, (prog, qubits, settings) in enumerate(zip(tqdm(programs, disable=not show_progress_bar),
meas_qubits, tomo_experiment)):
if symmetrize_readout == 'exhaustive' and len(qubits) > 0:
bitstrings, d_qub_idx = _exhaustive_symmetrization(qc, qubits, n_shots, prog)
elif symmetrize_readout is None and len(qubits) > 0:
total_prog_no_symm = prog.copy()
ro = total_prog_no_symm.declare('ro', 'BIT', len(qubits))
d_qub_idx = {}
for j, q in enumerate(qubits):
total_prog_no_symm += MEASURE(q, ro[j])
# Keep track of qubit-classical register mapping via dict
d_qub_idx[q] = j
total_prog_no_symm.wrap_in_numshots_loop(n_shots)
total_prog_no_symm_native = qc.compiler.quil_to_native_quil(total_prog_no_symm)
total_prog_no_symm_bin = qc.compiler.native_quil_to_executable(total_prog_no_symm_native)
bitstrings = qc.run(total_prog_no_symm_bin)
elif len(qubits) == 0:
# looks like an identity operation
pass
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
if progress_callback is not None:
progress_callback(i, len(tomo_experiment))
# 3. Post-process
# Inner loop over the grouped settings. They only differ in which qubits' measurements
# we include in the post-processing. For example, if `settings` is Z1, Z2, Z1Z2 and we
# measure (n_shots, n_qubits=2) obs_strings then the full operator value involves selecting
# either the first column, second column, or both and multiplying along the row.
for setting in settings:
# 3.1 Get the term's coefficient so we can multiply it in later.
coeff = complex(setting.out_operator.coefficient)
if not np.isclose(coeff.imag, 0):
raise ValueError(f"{setting}'s out_operator has a complex coefficient.")
coeff = coeff.real
# 3.2 Special case for measuring the "identity" operator, which doesn't make much
# sense but should happen perfectly.
if is_identity(setting.out_operator):
yield ExperimentResult(
setting=setting,
expectation=coeff,
std_err=0.0,
total_counts=n_shots,
)
continue
# 3.3 Obtain statistics from result of experiment
obs_mean, obs_var = _stats_from_measurements(bitstrings, d_qub_idx, setting, n_shots, coeff)
if calibrate_readout == 'plus-eig':
# 4 Readout calibration
# 4.1 Obtain calibration program
calibr_prog = _calibration_program(qc, tomo_experiment, setting)
# 4.2 Perform symmetrization on the calibration program
if symmetrize_readout == 'exhaustive':
qubs_calibr = setting.out_operator.get_qubits()
calibr_shots = n_shots
calibr_results, d_calibr_qub_idx = _exhaustive_symmetrization(qc, qubs_calibr, calibr_shots, calibr_prog)
else:
raise ValueError("Readout symmetrization method must be either 'exhaustive' or None")
# 4.3 Obtain statistics from the measurement process
obs_calibr_mean, obs_calibr_var = _stats_from_measurements(calibr_results, d_calibr_qub_idx, setting, calibr_shots)
# 4.3 Calibrate the readout results
corrected_mean = obs_mean / obs_calibr_mean
corrected_var = ratio_variance(obs_mean, obs_var, obs_calibr_mean, obs_calibr_var)
yield ExperimentResult(
setting=setting,
expectation=corrected_mean.item(),
std_err=np.sqrt(corrected_var).item(),
total_counts=n_shots,
raw_expectation=obs_mean.item(),
raw_std_err=np.sqrt(obs_var).item(),
calibration_expectation=obs_calibr_mean.item(),
calibration_std_err=np.sqrt(obs_calibr_var).item(),
calibration_counts=calibr_shots,
)
elif calibrate_readout is None:
# No calibration
yield ExperimentResult(
setting=setting,
expectation=obs_mean.item(),
std_err=np.sqrt(obs_var).item(),
total_counts=n_shots,
)
else:
raise ValueError("Calibration readout method must be either 'plus-eig' or None")
|
55,392 |
def _build_image(image_name, entrypoint, mlflow_home=None, custom_setup_steps_hook=None):
"""
Build an MLflow Docker image that can be used to serve a
The image is built locally and it requires Docker to run.
:param image_name: Docker image name.
:param entry_point: String containing ENTRYPOINT directive for docker image
:param mlflow_home: (Optional) Path to a local copy of the MLflow GitHub repository.
If specified, the image will install MLflow from this directory.
If None, it will install MLflow from pip.
:param custom_setup_steps_hook: (Optional) Single-argument function that takes the string path
of a dockerfile context directory and returns a string containing Dockerfile commands to
run during the image build step.
"""
mlflow_home = os.path.abspath(mlflow_home) if mlflow_home else None
with TempDir() as tmp:
cwd = tmp.path()
install_mlflow = _get_mlflow_install_step(cwd, mlflow_home)
custom_setup_steps = custom_setup_steps_hook(cwd) if custom_setup_steps_hook else ""
with open(os.path.join(cwd, "Dockerfile"), "w") as f:
f.write(
_DOCKERFILE_TEMPLATE.format(
install_mlflow=install_mlflow,
custom_setup_steps=custom_setup_steps,
entrypoint=entrypoint,
)
)
_logger.info("Building docker image with name %s", image_name)
os.system("find {cwd}/".format(cwd=cwd))
proc = Popen(
["docker", "build", "-t", image_name, "-f", "Dockerfile", ".", "--platform=linux/amd64"],
cwd=cwd,
stdout=PIPE,
stderr=STDOUT,
universal_newlines=True,
)
for x in iter(proc.stdout.readline, ""):
eprint(x, end="")
if proc.wait():
raise RuntimeError("Docker build failed.")
|
def _build_image(image_name, entrypoint, mlflow_home=None, custom_setup_steps_hook=None):
"""
Build an MLflow Docker image that can be used to serve a
The image is built locally and it requires Docker to run.
:param image_name: Docker image name.
:param entry_point: String containing ENTRYPOINT directive for docker image
:param mlflow_home: (Optional) Path to a local copy of the MLflow GitHub repository.
If specified, the image will install MLflow from this directory.
If None, it will install MLflow from pip.
:param custom_setup_steps_hook: (Optional) Single-argument function that takes the string path
of a dockerfile context directory and returns a string containing Dockerfile commands to
run during the image build step.
"""
mlflow_home = os.path.abspath(mlflow_home) if mlflow_home else None
with TempDir() as tmp:
cwd = tmp.path()
install_mlflow = _get_mlflow_install_step(cwd, mlflow_home)
custom_setup_steps = custom_setup_steps_hook(cwd) if custom_setup_steps_hook else ""
with open(os.path.join(cwd, "Dockerfile"), "w") as f:
f.write(
_DOCKERFILE_TEMPLATE.format(
install_mlflow=install_mlflow,
custom_setup_steps=custom_setup_steps,
entrypoint=entrypoint,
)
)
_logger.info("Building docker image with name %s", image_name)
os.system("find {cwd}/".format(cwd=cwd))
proc = Popen(
[
"docker",
"build",
"-t",
image_name,
"-f",
"Dockerfile",
".",
# <Insert comment on why we need this>
"--platform=linux/amd64",
],
cwd=cwd,
stdout=PIPE,
stderr=STDOUT,
universal_newlines=True,
)
for x in iter(proc.stdout.readline, ""):
eprint(x, end="")
if proc.wait():
raise RuntimeError("Docker build failed.")
|
45,175 |
def Cancelled(cls: Type[State] = State, **kwargs) -> State:
"""Convenience function for creating `Cancelled` states.
Returns:
State: a Cancelled state
"""
return schemas.states.Cancelled(cls=cls, **kwargs)
|
def Cancelled(cls: Type[State] = State, **kwargs) -> State:
"""Convenience function for creating `Cancelled` states.
Returns:
State: a `Cancelled` state
"""
return schemas.states.Cancelled(cls=cls, **kwargs)
|
36,087 |
def _get_filter(only_prefix: Iterable[str], ignore_prefix: Iterable[str]) -> Callable[[str], bool]:
"""Create filter for members to extract."""
if only_prefix:
def _filter(name):
return any(name.startswith(prefix) for prefix in only_prefix
) and all(not name.startswith(prefix) for prefix in ignore_prefix)
else:
def _filter(name):
return all(not name.startswith(prefix) for prefix in ignore_prefix)
return _filter
|
def _get_filter(only_prefix: Iterable[str], ignore_prefix: Iterable[str]) -> Callable[[str], bool]:
"""Create name filter."""
if only_prefix:
def _filter(name):
return any(name.startswith(prefix) for prefix in only_prefix
) and all(not name.startswith(prefix) for prefix in ignore_prefix)
else:
def _filter(name):
return all(not name.startswith(prefix) for prefix in ignore_prefix)
return _filter
|
43,823 |
def quantum_monte_carlo(fn, wires, target_wire, estimation_wires):
r"""Provides the circuit to perform the
`quantum Monte Carlo estimation <https://arxiv.org/abs/1805.00109>`__ algorithm.
The input ``fn`` should be the quantum circuit corresponding to the :math:`\mathcal{F}` unitary
in the paper above that encodes the probability distribution and random variable onto ``wires``
so that measurement of the ``target_wire`` provides the expectation value to be estimated.
The quantum Monte Carlo algorithm then estimates the expectation value using quantum phase
estimation (check out :class:`~.QuantumPhaseEstimation` for more details), using the
``estimation_wires``.
.. note::
A complementary approach for quantum Monte Carlo is available with the
:class:`~.QuantumMonteCarlo` template.
The ``quantum_monte_carlo`` transform is intended for
use when you already have the circuit for performing :math:`\mathcal{F}` set up, and is
compatible with resource estimation and potential hardware implementation. The
:class:`~.QuantumMonteCarlo` template is unitary-based and is only compatible with
simulators, but may perform faster and is suited to quick prototyping.
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
estimation_wires (Union[Wires, Sequence[int], or int]): the wires used for phase estimation
Returns:
function: The circuit for quantum Monte Carlo estimation
Raises:
ValueError: if ``wires`` and ``estimation_wires`` share a common wire
.. UsageDetails::
Consider an input quantum circuit ``fn`` that performs the unitary
.. math::
\mathcal{F} = \mathcal{R} \mathcal{A}.
.. figure:: ../../_static/ops/f.svg
:align: center
:width: 15%
:target: javascript:void(0);
Here, the unitary :math:`\mathcal{A}` prepares a probability distribution :math:`p(i)` of
dimension :math:`M = 2^{m}` over :math:`m \geq 1` qubits:
.. math::
\mathcal{A}|0\rangle^{\otimes m} = \sum_{i \in X} p(i) |i\rangle
where :math:`X = \{0, 1, \ldots, M - 1\}` and :math:`|i\rangle` is the basis state
corresponding to :math:`i`. The :math:`\mathcal{R}` unitary imprints the
result of a function :math:`f: X \rightarrow [0, 1]` onto an ancilla qubit:
.. math::
\mathcal{R}|i\rangle |0\rangle = |i\rangle \left(\sqrt{1 - f(i)} |0\rangle + \sqrt{f(i)}|1\rangle\right).
Following `this <https://arxiv.org/abs/1805.00109>`__ paper,
it can be seen that the probability of measuring the state :math:`|1\rangle` in the final
qubit is
.. math::
\mu = \sum_{i \in X} p(i) f(i).
However, it is possible to measure :math:`\mu` more efficiently using quantum Monte Carlo
estimation. This function transforms an input quantum circuit ``fn`` that performs the
unitary :math:`\mathcal{F}` to a larger circuit for measuring :math:`\mu` using the quantum
Monte Carlo algorithm.
.. figure:: ../../_static/ops/qmc.svg
:align: center
:width: 60%
:target: javascript:void(0);
The algorithm proceeds as follows:
#. The probability distribution :math:`p(i)` is encoded using a unitary :math:`\mathcal{A}`
applied to the first :math:`m` qubits specified by ``wires``.
#. The function :math:`f(i)` is encoded onto the ``target_wire`` using a unitary
:math:`\mathcal{R}`.
#. The unitary :math:`\mathcal{Q}` is defined with eigenvalues
:math:`e^{\pm 2 \pi i \theta}` such that the phase :math:`\theta` encodes the expectation
value through the equation :math:`\mu = (1 + \cos (\pi \theta)) / 2`. The circuit in
steps 1 and 2 prepares an equal superposition over the two states corresponding to the
eigenvalues :math:`e^{\pm 2 \pi i \theta}`.
#. The circuit returned by this function is applied so that :math:`\pm\theta` can be
estimated by finding the probabilities of the :math:`n` estimation wires. This in turn
allows for the estimation of :math:`\mu`.
Visit `Rebentrost et al. (2018)
<https://arxiv.org/abs/1805.00109>`__ for further details.
In this algorithm, the number of applications :math:`N` of the :math:`\mathcal{Q}` unitary
scales as :math:`2^{n}`. However, due to the use of quantum phase estimation, the error
:math:`\epsilon` scales as :math:`\mathcal{O}(2^{-n})`. Hence,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon}\right).
This scaling can be compared to standard Monte Carlo estimation, where :math:`N` samples are
generated from the probability distribution and the average over :math:`f` is taken. In that
case,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon^{2}}\right).
Hence, the quantum Monte Carlo algorithm has a quadratically improved time complexity with
:math:`N`.
**Example**
Consider a standard normal distribution :math:`p(x)` and a function
:math:`f(x) = \sin ^{2} (x)`. The expectation value of :math:`f(x)` is
:math:`\int_{-\infty}^{\infty}f(x)p(x) \approx 0.432332`. This number can be approximated by
discretizing the problem and using the quantum Monte Carlo algorithm.
First, the problem is discretized:
.. code-block:: python
from scipy.stats import norm
m = 5
M = 2 ** m
xmax = np.pi # bound to region [-pi, pi]
xs = np.linspace(-xmax, xmax, M)
probs = np.array([norm().pdf(x) for x in xs])
probs /= np.sum(probs)
func = lambda i: np.sin(xs[i]) ** 2
r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])
The ``quantum_monte_carlo`` transform can then be used:
.. code-block::
from pennylane.templates.state_preparations.mottonen import (
_uniform_rotation_dagger as r_unitary,
)
n = 6
N = 2 ** n
a_wires = range(m)
wires = range(m + 1)
target_wire = m
estimation_wires = range(m + 1, n + m + 1)
dev = qml.device("default.qubit", wires=(n + m + 1))
def fn():
qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=a_wires)
r_unitary(qml.RY, r_rotations, control_wires=a_wires[::-1], target_wire=target_wire)
@qml.qnode(dev)
def qmc():
qml.quantum_monte_carlo(fn, wires, target_wire, estimation_wires)()
return qml.probs(estimation_wires)
phase_estimated = np.argmax(qmc()[:int(N / 2)]) / N
The estimated value can be retrieved using the formula :math:`\mu = (1-\cos(\pi \theta))/2`
>>> (1 - np.cos(np.pi * phase_estimated)) / 2
0.42663476277231915
It is also possible to explore the resources required to perform the quantum Monte Carlo
algorithm
>>> qtape = qmc.qtape.expand(depth=1)
>>> qtape.get_resources()
{'RY': 14674,
'CNOT': 15686,
'PhaseShift': 1020,
'RX': 510,
'CZ': 126,
'PauliX': 1260,
'Toffoli': 2016,
'SWAP': 3,
'Hadamard': 6,
'ControlledPhaseShift': 15}
"""
wires = Wires(wires)
target_wire = Wires(target_wire)
estimation_wires = Wires(estimation_wires)
if Wires.shared_wires([wires, estimation_wires]):
raise ValueError("No wires can be shared between the wires and estimation_wires registers")
@wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
for i, control_wire in enumerate(estimation_wires):
Hadamard(control_wire)
# Find wires eligible to be used as helper wires
work_wires = estimation_wires.toset() - {control_wire}
n_reps = 2 ** (len(estimation_wires) - (i + 1))
q = apply_controlled_Q(
fn,
wires=wires,
target_wire=target_wire,
control_wire=control_wire,
work_wires=work_wires,
)
for _ in range(n_reps):
q(*args, **kwargs)
QFT(wires=estimation_wires).inv()
return wrapper
|
def quantum_monte_carlo(fn, wires, target_wire, estimation_wires):
r"""Provides the circuit to perform the
`quantum Monte Carlo estimation <https://arxiv.org/abs/1805.00109>`__ algorithm.
The input ``fn`` should be the quantum circuit corresponding to the :math:`\mathcal{F}` unitary
in the paper above that encodes the probability distribution and random variable onto ``wires``
so that measurement of the ``target_wire`` provides the expectation value to be estimated.
The quantum Monte Carlo algorithm then estimates the expectation value using quantum phase
estimation (check out :class:`~.QuantumPhaseEstimation` for more details), using the
``estimation_wires``.
.. note::
A complementary approach for quantum Monte Carlo is available with the
:class:`~.QuantumMonteCarlo` template.
The ``quantum_monte_carlo`` transform is intended for
use when you already have the circuit for performing :math:`\mathcal{F}` set up, and is
compatible with resource estimation and potential hardware implementation. The
:class:`~.QuantumMonteCarlo` template is unitary-based and is only compatible with
simulators, but may perform faster and is suited to quick prototyping.
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
estimation_wires (Union[Wires, Sequence[int], or int]): the wires used for phase estimation
Returns:
function: The circuit for quantum Monte Carlo estimation
Raises:
ValueError: if ``wires`` and ``estimation_wires`` share a common wire
.. UsageDetails::
Consider an input quantum circuit ``fn`` that performs the unitary
.. math::
\mathcal{F} = \mathcal{R} \mathcal{A}.
.. figure:: ../../_static/ops/f.svg
:align: center
:width: 15%
:target: javascript:void(0);
Here, the unitary :math:`\mathcal{A}` prepares a probability distribution :math:`p(i)` of
dimension :math:`M = 2^{m}` over :math:`m \geq 1` qubits:
.. math::
\mathcal{A}|0\rangle^{\otimes m} = \sum_{i \in X} p(i) |i\rangle,
where :math:`X = \{0, 1, \ldots, M - 1\}` and :math:`|i\rangle` is the basis state
corresponding to :math:`i`. The :math:`\mathcal{R}` unitary imprints the
result of a function :math:`f: X \rightarrow [0, 1]` onto an ancilla qubit:
.. math::
\mathcal{R}|i\rangle |0\rangle = |i\rangle \left(\sqrt{1 - f(i)} |0\rangle + \sqrt{f(i)}|1\rangle\right).
Following `this <https://arxiv.org/abs/1805.00109>`__ paper,
it can be seen that the probability of measuring the state :math:`|1\rangle` in the final
qubit is
.. math::
\mu = \sum_{i \in X} p(i) f(i).
However, it is possible to measure :math:`\mu` more efficiently using quantum Monte Carlo
estimation. This function transforms an input quantum circuit ``fn`` that performs the
unitary :math:`\mathcal{F}` to a larger circuit for measuring :math:`\mu` using the quantum
Monte Carlo algorithm.
.. figure:: ../../_static/ops/qmc.svg
:align: center
:width: 60%
:target: javascript:void(0);
The algorithm proceeds as follows:
#. The probability distribution :math:`p(i)` is encoded using a unitary :math:`\mathcal{A}`
applied to the first :math:`m` qubits specified by ``wires``.
#. The function :math:`f(i)` is encoded onto the ``target_wire`` using a unitary
:math:`\mathcal{R}`.
#. The unitary :math:`\mathcal{Q}` is defined with eigenvalues
:math:`e^{\pm 2 \pi i \theta}` such that the phase :math:`\theta` encodes the expectation
value through the equation :math:`\mu = (1 + \cos (\pi \theta)) / 2`. The circuit in
steps 1 and 2 prepares an equal superposition over the two states corresponding to the
eigenvalues :math:`e^{\pm 2 \pi i \theta}`.
#. The circuit returned by this function is applied so that :math:`\pm\theta` can be
estimated by finding the probabilities of the :math:`n` estimation wires. This in turn
allows for the estimation of :math:`\mu`.
Visit `Rebentrost et al. (2018)
<https://arxiv.org/abs/1805.00109>`__ for further details.
In this algorithm, the number of applications :math:`N` of the :math:`\mathcal{Q}` unitary
scales as :math:`2^{n}`. However, due to the use of quantum phase estimation, the error
:math:`\epsilon` scales as :math:`\mathcal{O}(2^{-n})`. Hence,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon}\right).
This scaling can be compared to standard Monte Carlo estimation, where :math:`N` samples are
generated from the probability distribution and the average over :math:`f` is taken. In that
case,
.. math::
N = \mathcal{O}\left(\frac{1}{\epsilon^{2}}\right).
Hence, the quantum Monte Carlo algorithm has a quadratically improved time complexity with
:math:`N`.
**Example**
Consider a standard normal distribution :math:`p(x)` and a function
:math:`f(x) = \sin ^{2} (x)`. The expectation value of :math:`f(x)` is
:math:`\int_{-\infty}^{\infty}f(x)p(x) \approx 0.432332`. This number can be approximated by
discretizing the problem and using the quantum Monte Carlo algorithm.
First, the problem is discretized:
.. code-block:: python
from scipy.stats import norm
m = 5
M = 2 ** m
xmax = np.pi # bound to region [-pi, pi]
xs = np.linspace(-xmax, xmax, M)
probs = np.array([norm().pdf(x) for x in xs])
probs /= np.sum(probs)
func = lambda i: np.sin(xs[i]) ** 2
r_rotations = np.array([2 * np.arcsin(np.sqrt(func(i))) for i in range(M)])
The ``quantum_monte_carlo`` transform can then be used:
.. code-block::
from pennylane.templates.state_preparations.mottonen import (
_uniform_rotation_dagger as r_unitary,
)
n = 6
N = 2 ** n
a_wires = range(m)
wires = range(m + 1)
target_wire = m
estimation_wires = range(m + 1, n + m + 1)
dev = qml.device("default.qubit", wires=(n + m + 1))
def fn():
qml.templates.MottonenStatePreparation(np.sqrt(probs), wires=a_wires)
r_unitary(qml.RY, r_rotations, control_wires=a_wires[::-1], target_wire=target_wire)
@qml.qnode(dev)
def qmc():
qml.quantum_monte_carlo(fn, wires, target_wire, estimation_wires)()
return qml.probs(estimation_wires)
phase_estimated = np.argmax(qmc()[:int(N / 2)]) / N
The estimated value can be retrieved using the formula :math:`\mu = (1-\cos(\pi \theta))/2`
>>> (1 - np.cos(np.pi * phase_estimated)) / 2
0.42663476277231915
It is also possible to explore the resources required to perform the quantum Monte Carlo
algorithm
>>> qtape = qmc.qtape.expand(depth=1)
>>> qtape.get_resources()
{'RY': 14674,
'CNOT': 15686,
'PhaseShift': 1020,
'RX': 510,
'CZ': 126,
'PauliX': 1260,
'Toffoli': 2016,
'SWAP': 3,
'Hadamard': 6,
'ControlledPhaseShift': 15}
"""
wires = Wires(wires)
target_wire = Wires(target_wire)
estimation_wires = Wires(estimation_wires)
if Wires.shared_wires([wires, estimation_wires]):
raise ValueError("No wires can be shared between the wires and estimation_wires registers")
@wraps(fn)
def wrapper(*args, **kwargs):
fn(*args, **kwargs)
for i, control_wire in enumerate(estimation_wires):
Hadamard(control_wire)
# Find wires eligible to be used as helper wires
work_wires = estimation_wires.toset() - {control_wire}
n_reps = 2 ** (len(estimation_wires) - (i + 1))
q = apply_controlled_Q(
fn,
wires=wires,
target_wire=target_wire,
control_wire=control_wire,
work_wires=work_wires,
)
for _ in range(n_reps):
q(*args, **kwargs)
QFT(wires=estimation_wires).inv()
return wrapper
|
45,737 |
def _linda_forecast(
precip,
precip_lagr_diff,
timesteps,
fct_gen,
precip_pert_gen,
vel_pert_gen,
n_ensemble_members,
seed,
measure_time,
print_info,
return_output,
callback,
):
"""Compute LINDA nowcast."""
# compute convolved difference fields
precip_lagr_diff = precip_lagr_diff.copy()
for i in range(precip_lagr_diff.shape[0]):
for _ in range(fct_gen["ari_order"] - i):
precip_lagr_diff[i] = _composite_convolution(
precip_lagr_diff[i],
fct_gen["kernels_1"],
fct_gen["interp_weights"],
)
# initialize the random generators
if precip_pert_gen is not None:
rs_precip_pert = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
rs_precip_pert.append(rs)
seed = rs.randint(0, high=1e9)
else:
rs_precip_pert = None
if vel_pert_gen is not None:
vps = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
vp = vel_pert_gen["init_func"](seed)
vps.append(
lambda t, vp=vp: vel_pert_gen["gen_func"](
vp, t * vel_pert_gen["timestep"]
)
)
seed = rs.randint(0, high=1e9)
else:
vps = None
state = {
"precip_fct": [precip[-1].copy() for i in range(n_ensemble_members)],
"precip_lagr_diff": [
precip_lagr_diff.copy() for i in range(n_ensemble_members)
],
"rs_precip_pert": rs_precip_pert,
}
params = {
"interp_weights": fct_gen["interp_weights"],
"kernels_1": fct_gen["kernels_1"],
"kernels_2": fct_gen["kernels_2"],
"mask_adv": fct_gen["mask_adv"],
"num_ens_members": n_ensemble_members,
"num_workers": fct_gen["num_workers"],
"num_ensemble_workers": min(n_ensemble_members, fct_gen["num_workers"]),
"precip_pert_gen": precip_pert_gen,
"psi": fct_gen["psi"],
}
precip_f = nowcast_main_loop(
precip[-1],
fct_gen["velocity"],
state,
timesteps,
fct_gen["extrap_method"],
_update,
extrap_kwargs=fct_gen["extrap_kwargs"],
vel_pert_gen=vps,
params=params,
callback=callback,
return_output=return_output,
num_workers=fct_gen["num_workers"],
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
if return_output:
if not fct_gen["add_perturbations"]:
precip_f = precip_f[0]
if measure_time:
return precip_f, mainloop_time
else:
return precip_f
else:
return None
|
def _linda_forecast(
precip,
precip_lagr_diff,
timesteps,
fct_gen,
precip_pert_gen,
vel_pert_gen,
n_ensemble_members,
seed,
measure_time,
print_info,
return_output,
callback,
):
"""Compute LINDA nowcast."""
# compute convolved difference fields
precip_lagr_diff = precip_lagr_diff.copy()
for i in range(precip_lagr_diff.shape[0]):
for _ in range(fct_gen["ari_order"] - i):
precip_lagr_diff[i] = _composite_convolution(
precip_lagr_diff[i],
fct_gen["kernels_1"],
fct_gen["interp_weights"],
)
# initialize the random generators
if precip_pert_gen is not None:
rs_precip_pert = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
rs_precip_pert.append(rs)
seed = rs.randint(0, high=1e9)
else:
rs_precip_pert = None
if velocity_pert_gen is not None:
vps = []
np.random.seed(seed)
for i in range(n_ensemble_members):
rs = np.random.RandomState(seed)
vp = vel_pert_gen["init_func"](seed)
vps.append(
lambda t, vp=vp: vel_pert_gen["gen_func"](
vp, t * vel_pert_gen["timestep"]
)
)
seed = rs.randint(0, high=1e9)
else:
vps = None
state = {
"precip_fct": [precip[-1].copy() for i in range(n_ensemble_members)],
"precip_lagr_diff": [
precip_lagr_diff.copy() for i in range(n_ensemble_members)
],
"rs_precip_pert": rs_precip_pert,
}
params = {
"interp_weights": fct_gen["interp_weights"],
"kernels_1": fct_gen["kernels_1"],
"kernels_2": fct_gen["kernels_2"],
"mask_adv": fct_gen["mask_adv"],
"num_ens_members": n_ensemble_members,
"num_workers": fct_gen["num_workers"],
"num_ensemble_workers": min(n_ensemble_members, fct_gen["num_workers"]),
"precip_pert_gen": precip_pert_gen,
"psi": fct_gen["psi"],
}
precip_f = nowcast_main_loop(
precip[-1],
fct_gen["velocity"],
state,
timesteps,
fct_gen["extrap_method"],
_update,
extrap_kwargs=fct_gen["extrap_kwargs"],
vel_pert_gen=vps,
params=params,
callback=callback,
return_output=return_output,
num_workers=fct_gen["num_workers"],
measure_time=measure_time,
)
if measure_time:
precip_f, mainloop_time = precip_f
if return_output:
if not fct_gen["add_perturbations"]:
precip_f = precip_f[0]
if measure_time:
return precip_f, mainloop_time
else:
return precip_f
else:
return None
|
31,424 |
def fetch_incidents(client, headers):
incidentquery = demisto.params().get("queryParameter")
incidentrepo = demisto.params().get("queryRepository")
timestampfrom = demisto.params().get("queryStartTime")
lastrun = demisto.getLastRun()
url = "/api/v1/repositories/" + incidentrepo + "/query"
headers["Accept"] = "application/json"
# set maximum of 50 returned events (this is idempotent)
incidentquery = incidentquery + "| head(50)"
backup_ts = int(datetime.now().timestamp()) * 1000
last_run_time = lastrun.get("time")
data = {
"queryString": incidentquery,
"end": "now",
"isLive": False,
"timeZoneOffsetMinutes": int(
demisto.params().get("queryTimeZoneOffsetMinutes")
),
}
if last_run_time is None:
# First run
data["start"] = timestampfrom
max_ts = 0
else:
data["start"] = int(last_run_time)
max_ts = int(last_run_time)
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
response_data = response.json()
for result in response_data:
ts = int(result.get("@timestamp", backup_ts))
if ts > max_ts:
max_ts = ts
# Ensures that max_ts gets a reasonable value if no events were returned on first run
if(not response_data):
max_ts = backup_ts
else:
max_ts += 1
demisto.setLastRun({"time": max_ts})
return form_incindents(response_data)
else:
raise ValueError(
"Error in fetching incidents. Error from server was: " + str(response.text)
)
|
def fetch_incidents(client, headers):
incidentquery = demisto.params().get("queryParameter")
incidentrepo = demisto.params().get("queryRepository")
timestampfrom = demisto.params().get("queryStartTime")
lastrun = demisto.getLastRun()
url = "/api/v1/repositories/" + incidentrepo + "/query"
headers["Accept"] = "application/json"
# set maximum of 50 returned events (this is idempotent)
incidentquery = incidentquery + "| head(50)"
backup_ts = int(datetime.now().timestamp()) * 1000
last_run_time = lastrun.get("time")
data = {
"queryString": incidentquery,
"end": "now",
"isLive": False,
"timeZoneOffsetMinutes": int(
demisto.params().get("queryTimeZoneOffsetMinutes")
),
}
if last_run_time is None:
# First run
data["start"] = timestampfrom
max_ts = 0
else:
data["start"] = int(last_run_time)
max_ts = int(last_run_time)
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
response_data = response.json()
for result in response_data:
ts = int(result.get("@timestamp", backup_ts))
if ts > max_ts:
max_ts = ts
# Ensures that max_ts gets a reasonable value if no events were returned on first run
if not response_data:
max_ts = backup_ts
else:
max_ts += 1
demisto.setLastRun({"time": max_ts})
return form_incindents(response_data)
else:
raise ValueError(
"Error in fetching incidents. Error from server was: " + str(response.text)
)
|
5,300 |
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Warnings regarding unsupported FB FT modes
loss_names = {0 : "Negative_Sampling_Loss", 1 : "Hierarchical_Softmax_Loss", 2 : "Softmax_Loss", 3 : "OneVsAll_Loss"}
model_name = {1 : "Continuous_Bag_Of_Words", 2 : "Skip_Gram", 3 : "Supervised"}
if m.loss == 3 or m.loss == 4:
warnings.warn(f'Provided an un-supported Facebook FastText loss mode (i.e. un-supported loss: {loss_names[m.loss]}), \n it may lead to inconsistent gensim model likely to fail later. \n Currently Supported loss modes are {loss_names[0]}, {loss_names[1]}')
if m.model == 3:
warnings.warn(f'Provided an un-supported Facebook FastText model mode (i.e. un-supported loss: {model_name[m.model]}), \n it may lead to inconsistent gensim model likely to fail later. \n Currently Supported model modes are {model_name[1]}, {model_name[2]}')
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
def _load_fasttext_format(model_file, encoding='utf-8', full_model=True):
"""Load the input-hidden weight matrix from Facebook's native fasttext `.bin` output files.
Parameters
----------
model_file : str
Full path to the FastText model file.
encoding : str, optional
Specifies the file encoding.
full_model : boolean, optional
If False, skips loading the hidden output matrix. This saves a fair bit
of CPU time and RAM, but prevents training continuation.
Returns
-------
:class: `~gensim.models.fasttext.FastText`
The loaded model.
"""
with utils.open(model_file, 'rb') as fin:
m = gensim.models._fasttext_bin.load(fin, encoding=encoding, full_model=full_model)
# Warnings regarding unsupported FB FT modes
loss_names = {0 : "Negative_Sampling_Loss", 1 : "Hierarchical_Softmax_Loss", 2 : "Softmax_Loss", 3 : "OneVsAll_Loss"}
model_name = {1 : "Continuous_Bag_Of_Words", 2 : "Skip_Gram", 3 : "Supervised"}
if m.loss == 3 or m.loss == 4:
logger.warning(f"{model_file } contains an unsupported Facebook FastText loss mode '{loss_names[m.loss]}'. Loading it may lead to errors later on. Supported loss modes are '{loss_names[0]}', '{loss_names[1]}'.")
if m.model == 3:
warnings.warn(f'Provided an un-supported Facebook FastText model mode (i.e. un-supported loss: {model_name[m.model]}), \n it may lead to inconsistent gensim model likely to fail later. \n Currently Supported model modes are {model_name[1]}, {model_name[2]}')
model = FastText(
vector_size=m.dim,
window=m.ws,
epochs=m.epoch,
negative=m.neg,
hs=int(m.loss == 1),
sg=int(m.model == 2),
bucket=m.bucket,
min_count=m.min_count,
sample=m.t,
min_n=m.minn,
max_n=m.maxn,
)
model.corpus_total_words = m.ntokens
model.raw_vocab = m.raw_vocab
model.nwords = m.nwords
model.vocab_size = m.vocab_size
#
# This is here to fix https://github.com/RaRe-Technologies/gensim/pull/2373.
#
# We explicitly set min_count=1 regardless of the model's parameters to
# ignore the trim rule when building the vocabulary. We do this in order
# to support loading native models that were trained with pretrained vectors.
# Such models will contain vectors for _all_ encountered words, not only
# those occurring more frequently than min_count.
#
# Native models trained _without_ pretrained vectors already contain the
# trimmed raw_vocab, so this change does not affect them.
#
model.prepare_vocab(update=True, min_count=1)
model.num_original_vectors = m.vectors_ngrams.shape[0]
model.wv.init_post_load(m.vectors_ngrams)
model._init_post_load(m.hidden_output)
_check_model(model)
model.add_lifecycle_event(
"load_fasttext_format",
msg=f"loaded {m.vectors_ngrams.shape} weight matrix for fastText model from {fin.name}",
)
return model
|
10,224 |
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold',
'GreaterThanOrEqualToThreshold', '<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count',
'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second',
'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list', default=[]),
insufficient_data_actions=dict(type='list', default=[]),
ok_actions=dict(type='list', default=[]),
treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
try:
connection = boto3_conn(module, conn_type='client', resource='cloudwatch',
region=region, endpoint=ec2_url, **aws_connect_params)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
|
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold',
'GreaterThanOrEqualToThreshold', '<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes',
'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count',
'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second',
'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second',
'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list', default=[]),
insufficient_data_actions=dict(type='list', default=[]),
ok_actions=dict(type='list', default=[]),
treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
connection = module.client('cloudwatch')
if region:
try:
connection = boto3_conn(module, conn_type='client', resource='cloudwatch',
region=region, endpoint=ec2_url, **aws_connect_params)
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
|
36,942 |
def add_parser(subparsers, parent_parser):
EXPERIMENTS_HELP = "Commands to run and compare experiments."
experiments_parser = subparsers.add_parser(
"experiments",
parents=[parent_parser],
aliases=["exp"],
description=append_doc_link(EXPERIMENTS_HELP, "exp"),
formatter_class=argparse.RawDescriptionHelpFormatter,
help=EXPERIMENTS_HELP,
)
experiments_subparsers = experiments_parser.add_subparsers(
dest="cmd",
help="Use `dvc experiments CMD --help` to display "
"command-specific help.",
)
fix_subparsers(experiments_subparsers)
EXPERIMENTS_SHOW_HELP = "Print experiments."
experiments_show_parser = experiments_subparsers.add_parser(
"show",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_SHOW_HELP, "exp/show"),
help=EXPERIMENTS_SHOW_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_show_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Show experiments derived from the tip of all Git branches.",
)
experiments_show_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Show experiments derived from all Git tags.",
)
experiments_show_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Show experiments derived from all Git commits.",
)
experiments_show_parser.add_argument(
"-n",
"--num",
type=int,
default=1,
dest="num",
metavar="<num>",
help="Show the last `num` commits from HEAD.",
)
experiments_show_parser.add_argument(
"--no-pager",
action="store_true",
default=False,
help="Do not pipe output into a pager.",
)
experiments_show_parser.add_argument(
"--include-metrics",
action="append",
default=[],
help="Include the specified metrics in output table.",
metavar="<metrics_list>",
)
experiments_show_parser.add_argument(
"--exclude-metrics",
action="append",
default=[],
help="Exclude the specified metrics from output table.",
metavar="<metrics_list>",
)
experiments_show_parser.add_argument(
"--include-params",
action="append",
default=[],
help="Include the specified params in output table.",
metavar="<params_list>",
)
experiments_show_parser.add_argument(
"--exclude-params",
action="append",
default=[],
help="Exclude the specified params from output table.",
metavar="<params_list>",
)
experiments_show_parser.add_argument(
"--param-deps",
action="store_true",
default=False,
help="Show only params that are stage dependencies.",
)
experiments_show_parser.add_argument(
"--sort-by",
help="Sort related experiments by the specified metric or param.",
metavar="<metric/param>",
)
experiments_show_parser.add_argument(
"--sort-order",
help="Sort order to use with --sort-by.",
choices=("asc", "desc"),
default="asc",
)
experiments_show_parser.add_argument(
"--no-timestamp",
action="store_true",
default=False,
help="Do not show experiment timestamps.",
)
experiments_show_parser.add_argument(
"--sha",
action="store_true",
default=False,
help="Always show git commit SHAs instead of branch/tag names.",
)
experiments_show_parser.add_argument(
"--show-json",
action="store_true",
default=False,
help="Print output in JSON format instead of a human-readable table.",
)
experiments_show_parser.add_argument(
"--show-csv",
action="store_true",
default=False,
help="Print output in csv format instead of a human-readable table.",
)
experiments_show_parser.add_argument(
"--precision",
type=int,
help=(
"Round metrics/params to `n` digits precision after the decimal "
f"point. Rounds to {DEFAULT_PRECISION} digits by default."
),
metavar="<n>",
)
experiments_show_parser.set_defaults(func=CmdExperimentsShow)
EXPERIMENTS_APPLY_HELP = (
"Apply the changes from an experiment to your workspace."
)
experiments_apply_parser = experiments_subparsers.add_parser(
"apply",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_APPLY_HELP, "exp/apply"),
help=EXPERIMENTS_APPLY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_apply_parser.add_argument(
"--no-force",
action="store_false",
dest="force",
help="Fail if this command would overwrite conflicting changes.",
)
experiments_apply_parser.add_argument(
"experiment", help="Experiment to be applied."
).complete = completion.EXPERIMENT
experiments_apply_parser.set_defaults(func=CmdExperimentsApply)
EXPERIMENTS_DIFF_HELP = (
"Show changes between experiments in the DVC repository."
)
experiments_diff_parser = experiments_subparsers.add_parser(
"diff",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_DIFF_HELP, "exp/diff"),
help=EXPERIMENTS_DIFF_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_diff_parser.add_argument(
"a_rev", nargs="?", help="Old experiment to compare (defaults to HEAD)"
).complete = completion.EXPERIMENT
experiments_diff_parser.add_argument(
"b_rev",
nargs="?",
help="New experiment to compare (defaults to the current workspace)",
).complete = completion.EXPERIMENT
experiments_diff_parser.add_argument(
"--all",
action="store_true",
default=False,
help="Show unchanged metrics/params as well.",
)
experiments_diff_parser.add_argument(
"--param-deps",
action="store_true",
default=False,
help="Show only params that are stage dependencies.",
)
experiments_diff_parser.add_argument(
"--show-json",
action="store_true",
default=False,
help="Show output in JSON format.",
)
experiments_diff_parser.add_argument(
"--show-md",
action="store_true",
default=False,
help="Show tabulated output in the Markdown format (GFM).",
)
experiments_diff_parser.add_argument(
"--old",
action="store_true",
default=False,
help="Show old metric/param value.",
)
experiments_diff_parser.add_argument(
"--no-path",
action="store_true",
default=False,
help="Don't show metric/param path.",
)
experiments_diff_parser.add_argument(
"--precision",
type=int,
help=(
"Round metrics/params to `n` digits precision after the decimal "
f"point. Rounds to {DEFAULT_PRECISION} digits by default."
),
metavar="<n>",
)
experiments_diff_parser.set_defaults(func=CmdExperimentsDiff)
EXPERIMENTS_RUN_HELP = "Run or resume an experiment."
experiments_run_parser = experiments_subparsers.add_parser(
"run",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_RUN_HELP, "exp/run"),
help=EXPERIMENTS_RUN_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
_add_run_common(experiments_run_parser)
experiments_run_parser.add_argument(
"-r",
"--rev",
type=str,
dest="checkpoint_resume",
help=(
"Continue the specified checkpoint experiment. Can only be used "
"in conjunction with --queue or --temp."
),
metavar="<experiment_rev>",
).complete = completion.EXPERIMENT
experiments_run_parser.add_argument(
"--reset",
action="store_true",
help="Reset existing checkpoints and restart the experiment.",
)
experiments_run_parser.set_defaults(func=CmdExperimentsRun)
EXPERIMENTS_GC_HELP = "Garbage collect unneeded experiments."
EXPERIMENTS_GC_DESCRIPTION = (
"Removes all experiments which are not derived from the specified"
"Git revisions."
)
experiments_gc_parser = experiments_subparsers.add_parser(
"gc",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_GC_DESCRIPTION, "exp/gc"),
help=EXPERIMENTS_GC_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_gc_parser.add_argument(
"-w",
"--workspace",
action="store_true",
default=False,
help="Keep experiments derived from the current workspace.",
)
experiments_gc_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Keep experiments derived from the tips of all Git branches.",
)
experiments_gc_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Keep experiments derived from all Git tags.",
)
experiments_gc_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Keep experiments derived from all Git commits.",
)
experiments_gc_parser.add_argument(
"--queued",
action="store_true",
default=False,
help=(
"Keep queued experiments (experiments run queue will be cleared "
"by default)."
),
)
experiments_gc_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Force garbage collection - automatically agree to all prompts.",
)
experiments_gc_parser.set_defaults(func=CmdExperimentsGC)
EXPERIMENTS_BRANCH_HELP = "Promote an experiment to a Git branch."
experiments_branch_parser = experiments_subparsers.add_parser(
"branch",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_BRANCH_HELP, "exp/branch"),
help=EXPERIMENTS_BRANCH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_branch_parser.add_argument(
"experiment", help="Experiment to be promoted."
)
experiments_branch_parser.add_argument(
"branch", help="Git branch name to use."
)
experiments_branch_parser.set_defaults(func=CmdExperimentsBranch)
EXPERIMENTS_LIST_HELP = "List local and remote experiments."
experiments_list_parser = experiments_subparsers.add_parser(
"list",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_LIST_HELP, "exp/list"),
help=EXPERIMENTS_LIST_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_list_parser.add_argument(
"--rev",
type=str,
default=None,
help=(
"List experiments derived from the specified revision. "
"Defaults to HEAD if neither `--rev` nor `--all` are specified."
),
metavar="<rev>",
)
experiments_list_parser.add_argument(
"--all", action="store_true", help="List all experiments."
)
experiments_list_parser.add_argument(
"--names-only",
action="store_true",
help="Only output experiment names (without parent commits).",
)
experiments_list_parser.add_argument(
"git_remote",
nargs="?",
default=None,
help=(
"Optional Git remote name or Git URL. "
"If provided, experiments from the specified Git repository "
" will be listed instead of local ones."
),
metavar="[<git_remote>]",
)
experiments_list_parser.set_defaults(func=CmdExperimentsList)
EXPERIMENTS_PUSH_HELP = "Push a local experiment to a Git remote."
experiments_push_parser = experiments_subparsers.add_parser(
"push",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_PUSH_HELP, "exp/push"),
help=EXPERIMENTS_PUSH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_push_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Replace experiment in the Git remote if it already exists.",
)
experiments_push_parser.add_argument(
"--no-cache",
action="store_false",
dest="push_cache",
help=(
"Do not push cached outputs for this experiment to DVC remote "
"storage."
),
)
experiments_push_parser.add_argument(
"-r",
"--remote",
dest="dvc_remote",
metavar="<name>",
help="Name of the DVC remote to use when pushing cached outputs.",
)
experiments_push_parser.add_argument(
"-j",
"--jobs",
type=int,
metavar="<number>",
help=(
"Number of jobs to run simultaneously when pushing to DVC remote "
"storage."
),
)
experiments_push_parser.add_argument(
"--run-cache",
action="store_true",
default=False,
help="Push run history for all stages.",
)
experiments_push_parser.add_argument(
"git_remote",
help="Git remote name or Git URL.",
metavar="<git_remote>",
)
experiments_push_parser.add_argument(
"experiment", help="Experiment to push.", metavar="<experiment>"
).complete = completion.EXPERIMENT
experiments_push_parser.set_defaults(func=CmdExperimentsPush)
EXPERIMENTS_PULL_HELP = "Pull an experiment from a Git remote."
experiments_pull_parser = experiments_subparsers.add_parser(
"pull",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_PULL_HELP, "exp/pull"),
help=EXPERIMENTS_PULL_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_pull_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Replace local experiment already exists.",
)
experiments_pull_parser.add_argument(
"--no-cache",
action="store_false",
dest="pull_cache",
help=(
"Do not pull cached outputs for this experiment from DVC remote "
"storage."
),
)
experiments_pull_parser.add_argument(
"-r",
"--remote",
dest="dvc_remote",
metavar="<name>",
help="Name of the DVC remote to use when pulling cached outputs.",
)
experiments_pull_parser.add_argument(
"-j",
"--jobs",
type=int,
metavar="<number>",
help=(
"Number of jobs to run simultaneously when pulling from DVC "
"remote storage."
),
)
experiments_pull_parser.add_argument(
"--run-cache",
action="store_true",
default=False,
help="Pull run history for all stages.",
)
experiments_pull_parser.add_argument(
"git_remote",
help="Git remote name or Git URL.",
metavar="<git_remote>",
)
experiments_pull_parser.add_argument(
"experiment", help="Experiment to pull.", metavar="<experiment>"
)
experiments_pull_parser.set_defaults(func=CmdExperimentsPull)
EXPERIMENTS_REMOVE_HELP = "Remove experiments."
experiments_remove_parser = experiments_subparsers.add_parser(
"remove",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_REMOVE_HELP, "exp/remove"),
help=EXPERIMENTS_REMOVE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
remove_group = experiments_remove_parser.add_mutually_exclusive_group()
remove_group.add_argument(
"--queue", action="store_true", help="Remove all queued experiments."
)
remove_group.add_argument(
"-A",
"--all",
action="store_true",
help="Remove all committed experiments.",
)
remove_group.add_argument(
"-g",
"--git-remote",
metavar="<git_remote>",
help="Name or URL of the Git remote to remove the experiment from",
)
experiments_remove_parser.add_argument(
"experiment",
nargs="*",
help="Experiments to remove.",
metavar="<experiment>",
)
experiments_remove_parser.set_defaults(func=CmdExperimentsRemove)
EXPERIMENTS_INIT_HELP = "Create experiments."
experiments_init_parser = experiments_subparsers.add_parser(
"init",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_INIT_HELP, "exp/init"),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_init_parser.add_argument(
"cmd",
nargs=argparse.REMAINDER,
help="Command to execute.",
metavar="command",
)
experiments_init_parser.add_argument(
"--run",
action="store_true",
help="Run the experiment after initializing it",
)
experiments_init_parser.add_argument(
"--interactive",
"-i",
action="store_true",
help="Prompt for values that are not provided",
)
experiments_init_parser.add_argument(
"--template", help="Stage template to use to fill with provided values"
)
experiments_init_parser.add_argument(
"--explicit", help="Only use the path values explicitly provided"
)
experiments_init_parser.add_argument(
"--name", "-n", help="Name of the stage to create"
)
experiments_init_parser.add_argument(
"--code",
help="Path to the source file or directory "
"which your experiment depends",
)
experiments_init_parser.add_argument(
"--data",
help="Path to the data file or directory "
"which your experiment depends",
)
experiments_init_parser.add_argument(
"--models",
help="Path to the model file or directory for your experiments",
)
experiments_init_parser.add_argument(
"--params", help="Path to the parameters file for your experiments"
)
experiments_init_parser.add_argument(
"--metrics", help="Path to the metrics file for your experiments"
)
experiments_init_parser.add_argument(
"--plots",
help="Path to the plots file or directory for your experiments",
)
experiments_init_parser.add_argument(
"--live", help="Path to log dvclive outputs for your experiments"
)
experiments_init_parser.set_defaults(func=CmdExperimentsInit)
|
def add_parser(subparsers, parent_parser):
EXPERIMENTS_HELP = "Commands to run and compare experiments."
experiments_parser = subparsers.add_parser(
"experiments",
parents=[parent_parser],
aliases=["exp"],
description=append_doc_link(EXPERIMENTS_HELP, "exp"),
formatter_class=argparse.RawDescriptionHelpFormatter,
help=EXPERIMENTS_HELP,
)
experiments_subparsers = experiments_parser.add_subparsers(
dest="cmd",
help="Use `dvc experiments CMD --help` to display "
"command-specific help.",
)
fix_subparsers(experiments_subparsers)
EXPERIMENTS_SHOW_HELP = "Print experiments."
experiments_show_parser = experiments_subparsers.add_parser(
"show",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_SHOW_HELP, "exp/show"),
help=EXPERIMENTS_SHOW_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_show_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Show experiments derived from the tip of all Git branches.",
)
experiments_show_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Show experiments derived from all Git tags.",
)
experiments_show_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Show experiments derived from all Git commits.",
)
experiments_show_parser.add_argument(
"-n",
"--num",
type=int,
default=1,
dest="num",
metavar="<num>",
help="Show the last `num` commits from HEAD.",
)
experiments_show_parser.add_argument(
"--no-pager",
action="store_true",
default=False,
help="Do not pipe output into a pager.",
)
experiments_show_parser.add_argument(
"--include-metrics",
action="append",
default=[],
help="Include the specified metrics in output table.",
metavar="<metrics_list>",
)
experiments_show_parser.add_argument(
"--exclude-metrics",
action="append",
default=[],
help="Exclude the specified metrics from output table.",
metavar="<metrics_list>",
)
experiments_show_parser.add_argument(
"--include-params",
action="append",
default=[],
help="Include the specified params in output table.",
metavar="<params_list>",
)
experiments_show_parser.add_argument(
"--exclude-params",
action="append",
default=[],
help="Exclude the specified params from output table.",
metavar="<params_list>",
)
experiments_show_parser.add_argument(
"--param-deps",
action="store_true",
default=False,
help="Show only params that are stage dependencies.",
)
experiments_show_parser.add_argument(
"--sort-by",
help="Sort related experiments by the specified metric or param.",
metavar="<metric/param>",
)
experiments_show_parser.add_argument(
"--sort-order",
help="Sort order to use with --sort-by.",
choices=("asc", "desc"),
default="asc",
)
experiments_show_parser.add_argument(
"--no-timestamp",
action="store_true",
default=False,
help="Do not show experiment timestamps.",
)
experiments_show_parser.add_argument(
"--sha",
action="store_true",
default=False,
help="Always show git commit SHAs instead of branch/tag names.",
)
experiments_show_parser.add_argument(
"--show-json",
action="store_true",
default=False,
help="Print output in JSON format instead of a human-readable table.",
)
experiments_show_parser.add_argument(
"--show-csv",
action="store_true",
default=False,
help="Print output in csv format instead of a human-readable table.",
)
experiments_show_parser.add_argument(
"--precision",
type=int,
help=(
"Round metrics/params to `n` digits precision after the decimal "
f"point. Rounds to {DEFAULT_PRECISION} digits by default."
),
metavar="<n>",
)
experiments_show_parser.set_defaults(func=CmdExperimentsShow)
EXPERIMENTS_APPLY_HELP = (
"Apply the changes from an experiment to your workspace."
)
experiments_apply_parser = experiments_subparsers.add_parser(
"apply",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_APPLY_HELP, "exp/apply"),
help=EXPERIMENTS_APPLY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_apply_parser.add_argument(
"--no-force",
action="store_false",
dest="force",
help="Fail if this command would overwrite conflicting changes.",
)
experiments_apply_parser.add_argument(
"experiment", help="Experiment to be applied."
).complete = completion.EXPERIMENT
experiments_apply_parser.set_defaults(func=CmdExperimentsApply)
EXPERIMENTS_DIFF_HELP = (
"Show changes between experiments in the DVC repository."
)
experiments_diff_parser = experiments_subparsers.add_parser(
"diff",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_DIFF_HELP, "exp/diff"),
help=EXPERIMENTS_DIFF_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_diff_parser.add_argument(
"a_rev", nargs="?", help="Old experiment to compare (defaults to HEAD)"
).complete = completion.EXPERIMENT
experiments_diff_parser.add_argument(
"b_rev",
nargs="?",
help="New experiment to compare (defaults to the current workspace)",
).complete = completion.EXPERIMENT
experiments_diff_parser.add_argument(
"--all",
action="store_true",
default=False,
help="Show unchanged metrics/params as well.",
)
experiments_diff_parser.add_argument(
"--param-deps",
action="store_true",
default=False,
help="Show only params that are stage dependencies.",
)
experiments_diff_parser.add_argument(
"--show-json",
action="store_true",
default=False,
help="Show output in JSON format.",
)
experiments_diff_parser.add_argument(
"--show-md",
action="store_true",
default=False,
help="Show tabulated output in the Markdown format (GFM).",
)
experiments_diff_parser.add_argument(
"--old",
action="store_true",
default=False,
help="Show old metric/param value.",
)
experiments_diff_parser.add_argument(
"--no-path",
action="store_true",
default=False,
help="Don't show metric/param path.",
)
experiments_diff_parser.add_argument(
"--precision",
type=int,
help=(
"Round metrics/params to `n` digits precision after the decimal "
f"point. Rounds to {DEFAULT_PRECISION} digits by default."
),
metavar="<n>",
)
experiments_diff_parser.set_defaults(func=CmdExperimentsDiff)
EXPERIMENTS_RUN_HELP = "Run or resume an experiment."
experiments_run_parser = experiments_subparsers.add_parser(
"run",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_RUN_HELP, "exp/run"),
help=EXPERIMENTS_RUN_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
_add_run_common(experiments_run_parser)
experiments_run_parser.add_argument(
"-r",
"--rev",
type=str,
dest="checkpoint_resume",
help=(
"Continue the specified checkpoint experiment. Can only be used "
"in conjunction with --queue or --temp."
),
metavar="<experiment_rev>",
).complete = completion.EXPERIMENT
experiments_run_parser.add_argument(
"--reset",
action="store_true",
help="Reset existing checkpoints and restart the experiment.",
)
experiments_run_parser.set_defaults(func=CmdExperimentsRun)
EXPERIMENTS_GC_HELP = "Garbage collect unneeded experiments."
EXPERIMENTS_GC_DESCRIPTION = (
"Removes all experiments which are not derived from the specified"
"Git revisions."
)
experiments_gc_parser = experiments_subparsers.add_parser(
"gc",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_GC_DESCRIPTION, "exp/gc"),
help=EXPERIMENTS_GC_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_gc_parser.add_argument(
"-w",
"--workspace",
action="store_true",
default=False,
help="Keep experiments derived from the current workspace.",
)
experiments_gc_parser.add_argument(
"-a",
"--all-branches",
action="store_true",
default=False,
help="Keep experiments derived from the tips of all Git branches.",
)
experiments_gc_parser.add_argument(
"-T",
"--all-tags",
action="store_true",
default=False,
help="Keep experiments derived from all Git tags.",
)
experiments_gc_parser.add_argument(
"-A",
"--all-commits",
action="store_true",
default=False,
help="Keep experiments derived from all Git commits.",
)
experiments_gc_parser.add_argument(
"--queued",
action="store_true",
default=False,
help=(
"Keep queued experiments (experiments run queue will be cleared "
"by default)."
),
)
experiments_gc_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Force garbage collection - automatically agree to all prompts.",
)
experiments_gc_parser.set_defaults(func=CmdExperimentsGC)
EXPERIMENTS_BRANCH_HELP = "Promote an experiment to a Git branch."
experiments_branch_parser = experiments_subparsers.add_parser(
"branch",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_BRANCH_HELP, "exp/branch"),
help=EXPERIMENTS_BRANCH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_branch_parser.add_argument(
"experiment", help="Experiment to be promoted."
)
experiments_branch_parser.add_argument(
"branch", help="Git branch name to use."
)
experiments_branch_parser.set_defaults(func=CmdExperimentsBranch)
EXPERIMENTS_LIST_HELP = "List local and remote experiments."
experiments_list_parser = experiments_subparsers.add_parser(
"list",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_LIST_HELP, "exp/list"),
help=EXPERIMENTS_LIST_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_list_parser.add_argument(
"--rev",
type=str,
default=None,
help=(
"List experiments derived from the specified revision. "
"Defaults to HEAD if neither `--rev` nor `--all` are specified."
),
metavar="<rev>",
)
experiments_list_parser.add_argument(
"--all", action="store_true", help="List all experiments."
)
experiments_list_parser.add_argument(
"--names-only",
action="store_true",
help="Only output experiment names (without parent commits).",
)
experiments_list_parser.add_argument(
"git_remote",
nargs="?",
default=None,
help=(
"Optional Git remote name or Git URL. "
"If provided, experiments from the specified Git repository "
" will be listed instead of local ones."
),
metavar="[<git_remote>]",
)
experiments_list_parser.set_defaults(func=CmdExperimentsList)
EXPERIMENTS_PUSH_HELP = "Push a local experiment to a Git remote."
experiments_push_parser = experiments_subparsers.add_parser(
"push",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_PUSH_HELP, "exp/push"),
help=EXPERIMENTS_PUSH_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_push_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Replace experiment in the Git remote if it already exists.",
)
experiments_push_parser.add_argument(
"--no-cache",
action="store_false",
dest="push_cache",
help=(
"Do not push cached outputs for this experiment to DVC remote "
"storage."
),
)
experiments_push_parser.add_argument(
"-r",
"--remote",
dest="dvc_remote",
metavar="<name>",
help="Name of the DVC remote to use when pushing cached outputs.",
)
experiments_push_parser.add_argument(
"-j",
"--jobs",
type=int,
metavar="<number>",
help=(
"Number of jobs to run simultaneously when pushing to DVC remote "
"storage."
),
)
experiments_push_parser.add_argument(
"--run-cache",
action="store_true",
default=False,
help="Push run history for all stages.",
)
experiments_push_parser.add_argument(
"git_remote",
help="Git remote name or Git URL.",
metavar="<git_remote>",
)
experiments_push_parser.add_argument(
"experiment", help="Experiment to push.", metavar="<experiment>"
).complete = completion.EXPERIMENT
experiments_push_parser.set_defaults(func=CmdExperimentsPush)
EXPERIMENTS_PULL_HELP = "Pull an experiment from a Git remote."
experiments_pull_parser = experiments_subparsers.add_parser(
"pull",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_PULL_HELP, "exp/pull"),
help=EXPERIMENTS_PULL_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_pull_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Replace local experiment already exists.",
)
experiments_pull_parser.add_argument(
"--no-cache",
action="store_false",
dest="pull_cache",
help=(
"Do not pull cached outputs for this experiment from DVC remote "
"storage."
),
)
experiments_pull_parser.add_argument(
"-r",
"--remote",
dest="dvc_remote",
metavar="<name>",
help="Name of the DVC remote to use when pulling cached outputs.",
)
experiments_pull_parser.add_argument(
"-j",
"--jobs",
type=int,
metavar="<number>",
help=(
"Number of jobs to run simultaneously when pulling from DVC "
"remote storage."
),
)
experiments_pull_parser.add_argument(
"--run-cache",
action="store_true",
default=False,
help="Pull run history for all stages.",
)
experiments_pull_parser.add_argument(
"git_remote",
help="Git remote name or Git URL.",
metavar="<git_remote>",
)
experiments_pull_parser.add_argument(
"experiment", help="Experiment to pull.", metavar="<experiment>"
)
experiments_pull_parser.set_defaults(func=CmdExperimentsPull)
EXPERIMENTS_REMOVE_HELP = "Remove experiments."
experiments_remove_parser = experiments_subparsers.add_parser(
"remove",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_REMOVE_HELP, "exp/remove"),
help=EXPERIMENTS_REMOVE_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
remove_group = experiments_remove_parser.add_mutually_exclusive_group()
remove_group.add_argument(
"--queue", action="store_true", help="Remove all queued experiments."
)
remove_group.add_argument(
"-A",
"--all",
action="store_true",
help="Remove all committed experiments.",
)
remove_group.add_argument(
"-g",
"--git-remote",
metavar="<git_remote>",
help="Name or URL of the Git remote to remove the experiment from",
)
experiments_remove_parser.add_argument(
"experiment",
nargs="*",
help="Experiments to remove.",
metavar="<experiment>",
)
experiments_remove_parser.set_defaults(func=CmdExperimentsRemove)
EXPERIMENTS_INIT_HELP = "Create experiments."
experiments_init_parser = experiments_subparsers.add_parser(
"init",
parents=[parent_parser],
description=append_doc_link(EXPERIMENTS_INIT_HELP, "exp/init"),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
experiments_init_parser.add_argument(
"cmd",
nargs=argparse.REMAINDER,
help="Command to execute.",
metavar="command",
)
experiments_init_parser.add_argument(
"--run",
action="store_true",
help="Run the experiment after initializing it",
)
experiments_init_parser.add_argument(
"--interactive",
"-i",
action="store_true",
help="Prompt for values that are not provided",
)
experiments_init_parser.add_argument(
"--template", help="Stage template to use to fill with provided values"
)
experiments_init_parser.add_argument(
"--explicit", help="Only use the path values explicitly provided"
)
experiments_init_parser.add_argument(
"--name", "-n", help="Name of the stage to create"
)
experiments_init_parser.add_argument(
"--code",
help="Path to the source file or directory "
"which your experiments depend",
)
experiments_init_parser.add_argument(
"--data",
help="Path to the data file or directory "
"which your experiment depends",
)
experiments_init_parser.add_argument(
"--models",
help="Path to the model file or directory for your experiments",
)
experiments_init_parser.add_argument(
"--params", help="Path to the parameters file for your experiments"
)
experiments_init_parser.add_argument(
"--metrics", help="Path to the metrics file for your experiments"
)
experiments_init_parser.add_argument(
"--plots",
help="Path to the plots file or directory for your experiments",
)
experiments_init_parser.add_argument(
"--live", help="Path to log dvclive outputs for your experiments"
)
experiments_init_parser.set_defaults(func=CmdExperimentsInit)
|
21,834 |
def gen_3pids(count: int):
"""Generate `count` threepids as a list."""
return [
{"medium": "email", "address": "user%[email protected]" % i} for i in range(count)
]
|
def gen_3pids(count: int) -> Dict[str, Any]:
"""Generate `count` threepids as a list."""
return [
{"medium": "email", "address": "user%[email protected]" % i} for i in range(count)
]
|
57,038 |
def validate_aggregated_stats(aggregated_stats):
"""Validates the attribute stats dict.
Args:
aggregated_stats: dict. Data that needs to be validated.
"""
exploration_stats_properties = [
'num_starts',
'num_actual_starts',
'num_completions'
]
state_stats_properties = [
'total_answers_count',
'useful_feedback_count',
'total_hit_count',
'first_hit_count',
'num_times_solution_viewed',
'num_completions'
]
try:
for exp_stats_property in exploration_stats_properties:
if exp_stats_property not in aggregated_stats:
raise base.BaseHandler.InvalidInputException(
'%s not in aggregated stats dict.' % (exp_stats_property))
for state_name in aggregated_stats['state_stats_mapping']:
for state_stats_property in state_stats_properties:
if state_stats_property not in aggregated_stats[
'state_stats_mapping'][state_name]:
raise base.BaseHandler.InvalidInputException(
'%s not in state stats mapping of %s in aggregated '
'stats dict.' % (state_stats_property, state_name))
except base.BaseHandler.InvalidInputException as e:
logging.exception(e)
|
def validate_aggregated_stats(aggregated_stats):
"""Validates the attribute stats dict.
Args:
aggregated_stats: dict. Data that needs to be validated.
"""
exploration_stats_properties = [
'num_starts',
'num_actual_starts',
'num_completions'
]
state_stats_properties = [
'total_answers_count',
'useful_feedback_count',
'total_hit_count',
'first_hit_count',
'num_times_solution_viewed',
'num_completions'
]
try:
for exp_stats_property in exploration_stats_properties:
if exp_stats_property not in aggregated_stats:
raise base.BaseHandler.InvalidInputException(
'%s not in aggregated stats dict.' % (exp_stats_property))
state_stats_mapping = aggregated_stats['state_stats_mapping']
for state_name in state_stats_mapping:
for state_stats_property in state_stats_properties:
if state_stats_property not in state_stats_mapping[state_name]:
raise base.BaseHandler.InvalidInputException(
'%s not in state stats mapping of %s in aggregated '
'stats dict.' % (state_stats_property, state_name))
except base.BaseHandler.InvalidInputException as e:
logging.exception(e)
|
32,081 |
def main():
file_type = ''
entry_id = demisto.args()['entryid']
max_depth = int(demisto.args().get('max_depth', '3'))
nesting_level_to_parse = demisto.args().get('nesting_level_to_parse', 'All files')
# we use the MAX_DEPTH_CONST to calculate the depth of the email
# each level will reduce the max_depth by 1
# not the best way to do it
global MAX_DEPTH_CONST
MAX_DEPTH_CONST = max_depth
if max_depth < 1:
return_error('Minimum max_depth is 1, the script will parse just the top email')
parse_only_headers = demisto.args().get('parse_only_headers', 'false').lower() == 'true'
try:
result = demisto.executeCommand('getFilePath', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_path = result[0]['Contents']['path']
file_name = result[0]['Contents']['name']
result = demisto.executeCommand('getEntry', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_metadata = result[0]['FileMetadata']
file_type = file_metadata.get('info', '') or file_metadata.get('type', '')
if 'MIME entity text, ISO-8859 text' in file_type:
file_type = 'application/pkcs7-mime'
except Exception as ex:
return_error(
"Failed to load file entry with entry id: {}. Error: {}".format(
entry_id, str(ex) + "\n\nTrace:\n" + traceback.format_exc()))
try:
file_type_lower = file_type.lower()
if 'composite document file v2 document' in file_type_lower \
or 'cdfv2 microsoft outlook message' in file_type_lower:
email_data, attached_emails = handle_msg(file_path, file_name, parse_only_headers, max_depth)
output = create_email_output(email_data, attached_emails)
elif any(eml_candidate in file_type_lower for eml_candidate in
['rfc 822 mail', 'smtp mail', 'multipart/signed', 'multipart/alternative', 'multipart/mixed',
'message/rfc822',
'application/pkcs7-mime', 'multipart/related']):
if 'unicode (with bom) text' in file_type_lower:
email_data, attached_emails = handle_eml(
file_path, False, file_name, parse_only_headers, max_depth, bom=True
)
else:
email_data, attached_emails = handle_eml(file_path, False, file_name, parse_only_headers, max_depth)
output = create_email_output(email_data, attached_emails)
elif ('ascii text' in file_type_lower or 'unicode text' in file_type_lower
or ('data' == file_type_lower.strip() and file_name and file_name.lower().strip().endswith('.eml'))):
try:
# Try to open the email as-is
with open(file_path, 'rb') as f:
file_contents = f.read()
if file_contents and 'Content-Type:'.lower() in file_contents.lower():
email_data, attached_emails = handle_eml(file_path, b64=False, file_name=file_name,
parse_only_headers=parse_only_headers, max_depth=max_depth)
output = create_email_output(email_data, attached_emails)
else:
# Try a base64 decode
b64decode(file_contents)
if file_contents and 'Content-Type:'.lower() in file_contents.lower():
email_data, attached_emails = handle_eml(file_path, b64=True, file_name=file_name,
parse_only_headers=parse_only_headers,
max_depth=max_depth)
output = create_email_output(email_data, attached_emails)
else:
try:
# Try to open
email_data, attached_emails = handle_eml(file_path, b64=False, file_name=file_name,
parse_only_headers=parse_only_headers,
max_depth=max_depth)
is_data_populated = is_email_data_populated(email_data)
if not is_data_populated:
raise DemistoException("No email_data found")
output = create_email_output(email_data, attached_emails)
except Exception as e:
demisto.debug("ParseEmailFiles failed with {}".format(str(e)))
return_error("Could not extract email from file. Possible reasons for this error are:\n"
"- Base64 decode did not include rfc 822 strings.\n"
"- Email contained no Content-Type and no data.")
except Exception as e:
return_error("Exception while trying to decode email from within base64: {}\n\nTrace:\n{}"
.format(str(e), traceback.format_exc()))
else:
return_error("Unknown file format: [{}] for file: [{}]".format(file_type, file_name))
output = recursive_convert_to_unicode(output)
email = output # output may be a single email
if isinstance(output, list) and len(output) > 0:
email, output = parse_nesting_level(nesting_level_to_parse, output)
return_outputs(
readable_output=data_to_md(email, file_name, print_only_headers=parse_only_headers),
outputs={
'Email': output
},
raw_response=output
)
except Exception as ex:
demisto.error(str(ex) + "\n\nTrace:\n" + traceback.format_exc())
return_error(str(ex) + "\n\nTrace:\n" + traceback.format_exc())
|
def main():
file_type = ''
entry_id = demisto.args()['entryid']
max_depth = int(demisto.args().get('max_depth', '3'))
nesting_level_to_parse = demisto.args().get('nesting_level_to_return', 'All files')
# we use the MAX_DEPTH_CONST to calculate the depth of the email
# each level will reduce the max_depth by 1
# not the best way to do it
global MAX_DEPTH_CONST
MAX_DEPTH_CONST = max_depth
if max_depth < 1:
return_error('Minimum max_depth is 1, the script will parse just the top email')
parse_only_headers = demisto.args().get('parse_only_headers', 'false').lower() == 'true'
try:
result = demisto.executeCommand('getFilePath', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_path = result[0]['Contents']['path']
file_name = result[0]['Contents']['name']
result = demisto.executeCommand('getEntry', {'id': entry_id})
if is_error(result):
return_error(get_error(result))
file_metadata = result[0]['FileMetadata']
file_type = file_metadata.get('info', '') or file_metadata.get('type', '')
if 'MIME entity text, ISO-8859 text' in file_type:
file_type = 'application/pkcs7-mime'
except Exception as ex:
return_error(
"Failed to load file entry with entry id: {}. Error: {}".format(
entry_id, str(ex) + "\n\nTrace:\n" + traceback.format_exc()))
try:
file_type_lower = file_type.lower()
if 'composite document file v2 document' in file_type_lower \
or 'cdfv2 microsoft outlook message' in file_type_lower:
email_data, attached_emails = handle_msg(file_path, file_name, parse_only_headers, max_depth)
output = create_email_output(email_data, attached_emails)
elif any(eml_candidate in file_type_lower for eml_candidate in
['rfc 822 mail', 'smtp mail', 'multipart/signed', 'multipart/alternative', 'multipart/mixed',
'message/rfc822',
'application/pkcs7-mime', 'multipart/related']):
if 'unicode (with bom) text' in file_type_lower:
email_data, attached_emails = handle_eml(
file_path, False, file_name, parse_only_headers, max_depth, bom=True
)
else:
email_data, attached_emails = handle_eml(file_path, False, file_name, parse_only_headers, max_depth)
output = create_email_output(email_data, attached_emails)
elif ('ascii text' in file_type_lower or 'unicode text' in file_type_lower
or ('data' == file_type_lower.strip() and file_name and file_name.lower().strip().endswith('.eml'))):
try:
# Try to open the email as-is
with open(file_path, 'rb') as f:
file_contents = f.read()
if file_contents and 'Content-Type:'.lower() in file_contents.lower():
email_data, attached_emails = handle_eml(file_path, b64=False, file_name=file_name,
parse_only_headers=parse_only_headers, max_depth=max_depth)
output = create_email_output(email_data, attached_emails)
else:
# Try a base64 decode
b64decode(file_contents)
if file_contents and 'Content-Type:'.lower() in file_contents.lower():
email_data, attached_emails = handle_eml(file_path, b64=True, file_name=file_name,
parse_only_headers=parse_only_headers,
max_depth=max_depth)
output = create_email_output(email_data, attached_emails)
else:
try:
# Try to open
email_data, attached_emails = handle_eml(file_path, b64=False, file_name=file_name,
parse_only_headers=parse_only_headers,
max_depth=max_depth)
is_data_populated = is_email_data_populated(email_data)
if not is_data_populated:
raise DemistoException("No email_data found")
output = create_email_output(email_data, attached_emails)
except Exception as e:
demisto.debug("ParseEmailFiles failed with {}".format(str(e)))
return_error("Could not extract email from file. Possible reasons for this error are:\n"
"- Base64 decode did not include rfc 822 strings.\n"
"- Email contained no Content-Type and no data.")
except Exception as e:
return_error("Exception while trying to decode email from within base64: {}\n\nTrace:\n{}"
.format(str(e), traceback.format_exc()))
else:
return_error("Unknown file format: [{}] for file: [{}]".format(file_type, file_name))
output = recursive_convert_to_unicode(output)
email = output # output may be a single email
if isinstance(output, list) and len(output) > 0:
email, output = parse_nesting_level(nesting_level_to_parse, output)
return_outputs(
readable_output=data_to_md(email, file_name, print_only_headers=parse_only_headers),
outputs={
'Email': output
},
raw_response=output
)
except Exception as ex:
demisto.error(str(ex) + "\n\nTrace:\n" + traceback.format_exc())
return_error(str(ex) + "\n\nTrace:\n" + traceback.format_exc())
|
52,026 |
def write_about_json(m):
with open(join(m.config.info_dir, 'about.json'), 'w') as fo:
d = {}
for key, default in FIELDS["about"].items():
value = m.get_value('about/%s' % key)
if value:
d[key] = value
if default is list:
d[key] = utils.ensure_list(value)
# for sake of reproducibility, record some conda info
d['conda_version'] = conda_version
d['conda_build_version'] = conda_build_version
# conda env will be in most, but not necessarily all installations.
# Don't die if we don't see it.
stripped_channels = []
for channel in get_rc_urls() + list(m.config.channel_urls):
stripped_channels.append(sanitize_channel(channel))
d['channels'] = stripped_channels
evars = ['CIO_TEST']
d['env_vars'] = {ev: os.getenv(ev, '<not set>') for ev in evars}
# this information will only be present in conda 4.2.10+
try:
d['conda_private'] = conda_private
except (KeyError, AttributeError):
pass
extra = m.get_section('extra')
if m.config.extra_info:
try:
git_info = get_git_info(m)
extra.update(git_info)
except Exception as e:
print(f"Unable to get git info. Skipping adding extra_info to about.json: {e}")
pass
env = environ.Environment(root_dir)
d['root_pkgs'] = env.package_specs()
# Include the extra section of the metadata in the about.json
d['extra'] = extra
json.dump(d, fo, indent=2, sort_keys=True)
|
def write_about_json(m):
with open(join(m.config.info_dir, 'about.json'), 'w') as fo:
d = {}
for key, default in FIELDS["about"].items():
value = m.get_value('about/%s' % key)
if value:
d[key] = value
if default is list:
d[key] = utils.ensure_list(value)
# for sake of reproducibility, record some conda info
d['conda_version'] = conda_version
d['conda_build_version'] = conda_build_version
# conda env will be in most, but not necessarily all installations.
# Don't die if we don't see it.
stripped_channels = []
for channel in get_rc_urls() + list(m.config.channel_urls):
stripped_channels.append(sanitize_channel(channel))
d['channels'] = stripped_channels
evars = ['CIO_TEST']
d['env_vars'] = {ev: os.getenv(ev, '<not set>') for ev in evars}
# this information will only be present in conda 4.2.10+
try:
d['conda_private'] = conda_private
except (KeyError, AttributeError):
pass
extra = m.get_section('extra')
if m.config.extra_info:
try:
git_info = get_git_info(m)
extra.update(git_info)
except Exception as exc:
print(f"Unable to get git info. Skipping adding extra_info to about.json: {exc}")
pass
env = environ.Environment(root_dir)
d['root_pkgs'] = env.package_specs()
# Include the extra section of the metadata in the about.json
d['extra'] = extra
json.dump(d, fo, indent=2, sort_keys=True)
|
13,577 |
def test_log_levels():
logger = NumpyMatrixOperator._logger
before_name = 'INFO'
logger.setLevel(before_name)
before = logger.level
with temporary_log_levels({logger.name: 'DEBUG'}):
assert 'DEBUG' == logging.getLevelName(logger.level)
assert logger.level != before
assert logger.level == before
assert before_name == logging.getLevelName(logger.level)
|
def test_log_levels():
logger = NumpyMatrixOperator._logger
before_name = 'INFO'
logger.setLevel(before_name)
before = logger.level
with log_levels({logger.name: 'DEBUG'}):
assert 'DEBUG' == logging.getLevelName(logger.level)
assert logger.level != before
assert logger.level == before
assert before_name == logging.getLevelName(logger.level)
|
14,884 |
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Openhome platform."""
if not discovery_info:
return True
if DATA_OPENHOME not in hass.data:
hass.data[DATA_OPENHOME] = []
name = discovery_info.get("name")
description = discovery_info.get("ssdp_description")
_LOGGER.info("Openhome device found: %s", name)
device = Device(description)
# if device has already been discovered
if device.Uuid() in [x.unique_id for x in hass.data[DATA_OPENHOME]]:
return True
hass_device = OpenhomeDevice(hass, device)
add_entities([hass_device], True)
hass.data[DATA_OPENHOME].append(hass_device)
def service_handle(call):
"""Handle the service call."""
entity_id = call.data.get(ATTR_ENTITY_ID)
device = next(
[
device
for device in hass.data[DATA_OPENHOME]
if device.entity_id == entity_id
].__iter__(),
None,
)
if not device:
return
if call.service == SERVICE_INVOKE_PIN:
index = call.data.get(ATTR_PIN_INDEX)
_LOGGER.info("Openhome invoking pin %s on %s", index, entity_id)
if index:
device.invoke_pin(index)
hass.services.register(
DOMAIN, SERVICE_INVOKE_PIN, service_handle, schema=OPENHOME_PIN_SCHEMA
)
return True
|
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Openhome platform."""
if not discovery_info:
return True
if DATA_OPENHOME not in hass.data:
hass.data[DATA_OPENHOME] = []
name = discovery_info.get("name")
description = discovery_info.get("ssdp_description")
_LOGGER.info("Openhome device found: %s", name)
device = Device(description)
# if device has already been discovered
if device.Uuid() in [x.unique_id for x in hass.data[DATA_OPENHOME]]:
return True
hass_device = OpenhomeDevice(hass, device)
add_entities([hass_device], True)
hass.data[DATA_OPENHOME].append(entity)
def service_handle(call):
"""Handle the service call."""
entity_id = call.data.get(ATTR_ENTITY_ID)
device = next(
[
device
for device in hass.data[DATA_OPENHOME]
if device.entity_id == entity_id
].__iter__(),
None,
)
if not device:
return
if call.service == SERVICE_INVOKE_PIN:
index = call.data.get(ATTR_PIN_INDEX)
_LOGGER.info("Openhome invoking pin %s on %s", index, entity_id)
if index:
device.invoke_pin(index)
hass.services.register(
DOMAIN, SERVICE_INVOKE_PIN, service_handle, schema=OPENHOME_PIN_SCHEMA
)
return True
|
31,512 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
url = str(params.get("url"))
if url[-1] == "/":
base_url = url + "api/v2/"
else:
base_url = url + "/api/v2/"
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, indicators = fetch_indicators_command(client=client, last_run=demisto.getIntegrationContext(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count)
demisto.setIntegrationContext(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
url = str(params.get("url"))
if url[-1] == "/":
base_url = url + "api/v2/"
else:
base_url = url + "/api/v2/"
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, indicators = fetch_indicators_command(client=client, last_run=get_integration_context(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count)
demisto.setIntegrationContext(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
12,296 |
def hadamard_transform(N=1):
"""Quantum object representing the N-qubit Hadamard gate.
Returns
-------
q : qobj
Quantum object representation of the N-qubit Hadamard gate.
"""
data = [[1, 1], [1, -1]]
H = Qobj(data)/np.sqrt(2)
return tensor([H]*N)
|
def hadamard_transform(N=1):
"""Quantum object representing the N-qubit Hadamard gate.
Returns
-------
q : qobj
Quantum object representation of the N-qubit Hadamard gate.
"""
H = Qobj([[1, 1], [1, -1]]) / np.sqrt(2)
return tensor([H]*N)
|
1,892 |
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, default=1
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}, default='arpack'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for 'arpack' method.
Not used if eigen_solver=='dense'
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
|
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, default=1
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for 'arpack' method.
Not used if eigen_solver=='dense'
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term: `Glossary <random_state>`.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
|
43,832 |
def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
``"default"`` and ``"timing"``.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
31,993 |
def list_incidents_command(client: Client, args: Dict[str, Any]) -> CommandResults:
limit = args.get('limit')
until = create_time(args.get('until'))
since = create_time(args.get('since'))
page_size = args.get('page_size', '50')
page_number = args.get('page_number')
items = []
if not limit:
response = client.list_incidents_request(until, since, page_size, page_number)
items = response.get('items', [])
else:
limit = int(limit)
page_size = page_size if limit > 100 else limit
total = 0
while total < limit:
response = client.list_incidents_request(until, since, page_size, page_number)
items += response.get('items', [])
if not response.get('hasNext'):
break
total += len(response.get('items'))
items = remove_duplicates_in_items(items, 'id')
context_data = prepare_paging_context_data(response, items, 'Incidents')
page_number = response.get('pageNumber')
output = prepare_incidents_readable_items(items)
total_pages = response.get('totalPages')
text = f'Total Retrieved Incidents : {len(output)}\n Page number {page_number} out of {total_pages} '
humanReadable = tableToMarkdown(text, output, ['Id', 'Title', 'Summary', 'Priority', 'RiskScore', 'Status',
'AlertCount', 'Created', 'LastUpdated', 'Assignee', 'Sources',
'Categories'])
command_results = CommandResults(
outputs=context_data,
readable_output=humanReadable,
raw_response=response
)
return command_results
|
def list_incidents_command(client: Client, args: Dict[str, Any]) -> CommandResults:
limit = arg_to_number(args.get('limit'))
until = create_time(args.get('until'))
since = create_time(args.get('since'))
page_size = args.get('page_size', '50')
page_number = args.get('page_number')
items = []
if not limit:
response = client.list_incidents_request(until, since, page_size, page_number)
items = response.get('items', [])
else:
limit = int(limit)
page_size = page_size if limit > 100 else limit
total = 0
while total < limit:
response = client.list_incidents_request(until, since, page_size, page_number)
items += response.get('items', [])
if not response.get('hasNext'):
break
total += len(response.get('items'))
items = remove_duplicates_in_items(items, 'id')
context_data = prepare_paging_context_data(response, items, 'Incidents')
page_number = response.get('pageNumber')
output = prepare_incidents_readable_items(items)
total_pages = response.get('totalPages')
text = f'Total Retrieved Incidents : {len(output)}\n Page number {page_number} out of {total_pages} '
humanReadable = tableToMarkdown(text, output, ['Id', 'Title', 'Summary', 'Priority', 'RiskScore', 'Status',
'AlertCount', 'Created', 'LastUpdated', 'Assignee', 'Sources',
'Categories'])
command_results = CommandResults(
outputs=context_data,
readable_output=humanReadable,
raw_response=response
)
return command_results
|
59,127 |
def get_entry_point(group: str, name: str) -> EntryPoint:
"""
Return an entry point with a given name within a specific group
:param group: the entry point group
:param name: the name of the entry point
:return: the entry point if it exists else None
:raises aiida.common.MissingEntryPointError: entry point was not registered
"""
# The next line should be removed for ``aiida-core==3.0`` when the old deprecated entry points are fully removed.
name = convert_potentially_deprecated_entry_point(group, name)
found = eps().select(group=group, name=name)
if name not in found.names:
raise MissingEntryPointError(f"Entry point '{name}' not found in group '{group}'")
if len(found) > 1:
# if the entry points have the same *value*, it does not matter which one we load
# (needed e.g. to allow installing aiida-core in user space over an existing system-level installation)
if len(set(ep.value for ep in found)) != 1:
raise MultipleEntryPointError(f"Multiple entry points '{name}' found in group '{group}': {found}")
return found[name]
|
def get_entry_point(group: str, name: str) -> EntryPoint:
"""
Return an entry point with a given name within a specific group
:param group: the entry point group
:param name: the name of the entry point
:return: the entry point if it exists else None
:raises aiida.common.MissingEntryPointError: entry point was not registered
"""
# The next line should be removed for ``aiida-core==3.0`` when the old deprecated entry points are fully removed.
name = convert_potentially_deprecated_entry_point(group, name)
found = eps().select(group=group, name=name)
if name not in found.names:
raise MissingEntryPointError(f"Entry point '{name}' not found in group '{group}'")
# If multiple entry points are found and they have different values we raise, otherwise if they all
# correspond to the same value, we simply return one of them
if len(found) > 1 and len(set(ep.value for ep in found)) != 1:
raise MultipleEntryPointError(f"Multiple entry points '{name}' found in group '{group}': {found}")
return found[name]
|
34,161 |
def test_core(
model: Optional[Text] = None,
stories: Optional[Text] = None,
endpoints: Optional[Text] = None,
output: Text = DEFAULT_RESULTS_PATH,
kwargs: Optional[Dict] = None,
):
import rasa.core.test
import rasa.core.utils as core_utils
from rasa.nlu import utils as nlu_utils
from rasa.model import get_model
from rasa.core.interpreter import NaturalLanguageInterpreter
from rasa.core.agent import Agent
_endpoints = core_utils.AvailableEndpoints.read_endpoints(endpoints)
if kwargs is None:
kwargs = {}
if output:
nlu_utils.create_dir(output)
unpacked_model = get_model(model)
if unpacked_model is None:
print_error(
"Not able to test: Could not find any model. Use 'rasa train' to train a "
"Rasa model."
)
return
core_path, nlu_path = get_model_subdirectories(unpacked_model)
if not os.path.exists(core_path):
print_error(
"Not able to test: Could not find any Core model. Use 'rasa train' to "
"train a model."
)
use_e2e = kwargs["e2e"] if "e2e" in kwargs else False
_interpreter = RegexInterpreter()
if use_e2e:
if os.path.exists(nlu_path):
_interpreter = NaturalLanguageInterpreter.create(nlu_path, _endpoints.nlu)
else:
print_warning(
"No NLU model found. Use default 'RegexInterpreter' for end-to-end "
"evaluation."
)
_agent = Agent.load(unpacked_model, interpreter=_interpreter)
kwargs = minimal_kwargs(kwargs, rasa.core.test, ["stories", "agent"])
loop = asyncio.get_event_loop()
loop.run_until_complete(
rasa.core.test(stories, _agent, out_directory=output, **kwargs)
)
|
def test_core(
model: Optional[Text] = None,
stories: Optional[Text] = None,
endpoints: Optional[Text] = None,
output: Text = DEFAULT_RESULTS_PATH,
kwargs: Optional[Dict] = None,
):
import rasa.core.test
import rasa.core.utils as core_utils
from rasa.nlu import utils as nlu_utils
from rasa.model import get_model
from rasa.core.interpreter import NaturalLanguageInterpreter
from rasa.core.agent import Agent
_endpoints = core_utils.AvailableEndpoints.read_endpoints(endpoints)
if kwargs is None:
kwargs = {}
if output:
nlu_utils.create_dir(output)
unpacked_model = get_model(model)
if unpacked_model is None:
print_error(
"Not able to test: Could not find any model. Use 'rasa train' to train a "
"Rasa model."
)
return
core_path, nlu_path = get_model_subdirectories(unpacked_model)
if not os.path.exists(core_path):
print_error(
"Not able to test: Could not find any Core model. Use 'rasa train' to "
"train a model."
)
use_e2e = kwargs["e2e"] if "e2e" in kwargs else False
_interpreter = RegexInterpreter()
if use_e2e:
if os.path.exists(nlu_path):
_interpreter = NaturalLanguageInterpreter.create(nlu_path, _endpoints.nlu)
else:
print_warning(
"No NLU model found. Using default 'RegexInterpreter' for end-to-end "
"evaluation."
)
_agent = Agent.load(unpacked_model, interpreter=_interpreter)
kwargs = minimal_kwargs(kwargs, rasa.core.test, ["stories", "agent"])
loop = asyncio.get_event_loop()
loop.run_until_complete(
rasa.core.test(stories, _agent, out_directory=output, **kwargs)
)
|
24,795 |
def _is_part_of_with_items(node: nodes.Call) -> bool:
"""
Checks if one of the node's parents is an ``nodes.With`` node and that the node itself is located
somewhere under its ``items``.
"""
frame = node.frame()
current = node
while current != frame:
if isinstance(current, nodes.With):
items_start = current.items[0][0].lineno
items_end = current.items[-1][0].tolineno
return items_start <= node.lineno <= items_end
current = current.parent
return False
|
def _is_part_of_with_items(node: nodes.Call) -> bool:
"""
Checks if one of the node's parents is a ``nodes.With`` node and that the node itself is located
somewhere under its ``items``.
"""
frame = node.frame()
current = node
while current != frame:
if isinstance(current, nodes.With):
items_start = current.items[0][0].lineno
items_end = current.items[-1][0].tolineno
return items_start <= node.lineno <= items_end
current = current.parent
return False
|
46,953 |
def get_git_info():
try:
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
"hostname": str(socket.gethostname()),
}
return repo_infos
except (TypeError):
return {
"repo_id": None,
"repo_sha": None,
"repo_branch": None,
"hostname": None,
}
|
def get_git_info():
try:
repo = git.Repo(search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
"hostname": str(socket.gethostname()),
}
return repo_infos
except TypeError:
return {
"repo_id": None,
"repo_sha": None,
"repo_branch": None,
"hostname": None,
}
|
1,569 |
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
if issparse(y):
raise TypeError(
"multilabel-indicator of type Sparse are not supported."
)
else:
raise ValueError("Unknown label type: %r" % y_type)
|
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
if issparse(y):
raise ValueError(
"multilabel-indicator of type Sparse are not supported."
)
else:
raise ValueError("Unknown label type: %r" % y_type)
|
54,530 |
def test_dominates() -> None:
def create_trial(values: List[float], state: TrialState = TrialState.COMPLETE) -> FrozenTrial:
return optuna.trial.create_trial(values=values, state=state)
directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE]
def check_domination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
def check_nondomination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert not _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
# The numbers of objectives for `t0` and `t1` don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1, 2]) # Two objectives.
_dominates(t0, t1, directions)
# The numbers of objectives and directions don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1]) # One objective.
_dominates(t0, t1, directions)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 1])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([float("inf"), 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([-float("inf"), float("inf")])
t1 = create_trial([0, 1])
check_domination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([1, 1])
t1 = create_trial([1, 1])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([0, 1])
t1 = create_trial([1, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([-float("inf"), 1])
t1 = create_trial([0, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([float("inf"), float("inf")])
t1 = create_trial([float("inf"), float("inf")])
check_nondomination(t0, t1)
for t0_state in [TrialState.FAIL, TrialState.WAITING, TrialState.PRUNED]:
t0 = create_trial([1, 1], t0_state)
for t1_state in [
TrialState.COMPLETE,
TrialState.FAIL,
TrialState.WAITING,
TrialState.PRUNED,
]:
# If `t0` has not the COMPLETE state, it never dominates other trials.
t1 = create_trial([0, 2], t1_state)
if t1_state == TrialState.COMPLETE:
# If `t0` isn't COMPLETE and `t1` is COMPLETE, `t1` dominates `t0`.
check_domination(t1, t0)
else:
# If `t1` isn't COMPLETE, it doesn't dominate others.
check_nondomination(t0, t1)
|
def test_dominates() -> None:
def create_trial(values: List[float], state: TrialState = TrialState.COMPLETE) -> FrozenTrial:
return optuna.trial.create_trial(values=values, state=state)
directions = [StudyDirection.MINIMIZE, StudyDirection.MAXIMIZE]
def check_domination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
def check_nondomination(t0: FrozenTrial, t1: FrozenTrial) -> None:
assert not _dominates(t0, t1, directions)
assert not _dominates(t1, t0, directions)
# The numbers of objectives for `t0` and `t1` don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1, 2]) # Two objectives.
_dominates(t0, t1, directions)
# The numbers of objectives and directions don't match.
with pytest.raises(ValueError):
t0 = create_trial([1]) # One objective.
t1 = create_trial([1]) # One objective.
_dominates(t0, t1, directions)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 1])
t1 = create_trial([1, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([float("inf"), 2])
t1 = create_trial([float("inf"), 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([-float("inf"), float("inf")])
t1 = create_trial([0, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, float("inf")])
t1 = create_trial([0, 1])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, float("inf")])
t1 = create_trial([0, -float("inf")])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([-float("inf"), 0])
t1 = create_trial([float("inf"), 0])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([0, float("inf")])
t1 = create_trial([float("inf"), 0])
check_domination(t0, t1)
# `t0` dominates `t1`.
t0 = create_trial([-float("inf"), 0])
t1 = create_trial([0, -float("inf")])
check_domination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([1, 1])
t1 = create_trial([1, 1])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([0, 1])
t1 = create_trial([1, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([-float("inf"), 1])
t1 = create_trial([0, 2])
check_nondomination(t0, t1)
# `t0` and `t1` don't dominate each other.
t0 = create_trial([float("inf"), float("inf")])
t1 = create_trial([float("inf"), float("inf")])
check_nondomination(t0, t1)
for t0_state in [TrialState.FAIL, TrialState.WAITING, TrialState.PRUNED]:
t0 = create_trial([1, 1], t0_state)
for t1_state in [
TrialState.COMPLETE,
TrialState.FAIL,
TrialState.WAITING,
TrialState.PRUNED,
]:
# If `t0` has not the COMPLETE state, it never dominates other trials.
t1 = create_trial([0, 2], t1_state)
if t1_state == TrialState.COMPLETE:
# If `t0` isn't COMPLETE and `t1` is COMPLETE, `t1` dominates `t0`.
check_domination(t1, t0)
else:
# If `t1` isn't COMPLETE, it doesn't dominate others.
check_nondomination(t0, t1)
|
31,781 |
def feed_main(params, feed_name, prefix):
handle_proxy()
client = Client(**params)
indicator_type = params.get('indicator_type')
auto_detect = params.get('auto_detect_type')
feedTags = argToList(params.get('feedTags'))
limit = int(demisto.args().get('limit', 10))
command = demisto.command()
if prefix and not prefix.endswith('-'):
prefix += '-'
if command != 'fetch-indicators':
demisto.info(f'Command being called is {demisto.command()}')
try:
if command == 'test-module':
return_results(test_module(client, limit))
elif command == 'fetch-indicators':
create_relationships = params.get('create_relationships')
indicators, no_update = fetch_indicators_command(client, indicator_type, feedTags, auto_detect,
create_relationships)
# get demisto version
demisto_version = get_demisto_version().get('version', '5.5.0')
# check if the version is higher than 6.5.0 so we can use noUpdate parameter
if LooseVersion(demisto_version) >= LooseVersion('6.5.0'):
if not len(indicators):
demisto.createIndicators(indicators, noUpdate=no_update)
else:
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b, noUpdate=no_update)
else:
# call createIndicators without noUpdate arg
if not len(indicators):
demisto.createIndicators(indicators)
else:
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == f'{prefix}get-indicators':
# dummy command for testing
create_relationships = params.get('create_relationships')
indicators, _ = fetch_indicators_command(client, indicator_type, feedTags, auto_detect, create_relationships, limit)
hr = tableToMarkdown('Indicators', indicators, headers=['value', 'type', 'rawJSON'])
return_results(CommandResults(readable_output=hr, raw_response=indicators))
except Exception as err:
err_msg = f'Error in {feed_name} integration [{err}]'
return_error(err_msg)
|
def feed_main(params, feed_name, prefix):
handle_proxy()
client = Client(**params)
indicator_type = params.get('indicator_type')
auto_detect = params.get('auto_detect_type')
feedTags = argToList(params.get('feedTags'))
limit = int(demisto.args().get('limit', 10))
command = demisto.command()
if prefix and not prefix.endswith('-'):
prefix += '-'
if command != 'fetch-indicators':
demisto.info(f'Command being called is {demisto.command()}')
try:
if command == 'test-module':
return_results(test_module(client, limit))
elif command == 'fetch-indicators':
create_relationships = params.get('create_relationships')
indicators, no_update = fetch_indicators_command(client, indicator_type, feedTags, auto_detect,
create_relationships)
# get demisto version
demisto_version = get_demisto_version().get('version', '5.5.0')
# check if the version is higher than 6.5.0 so we can use noUpdate parameter
if LooseVersion(demisto_version) >= LooseVersion('6.5.0'):
if not len(indicators):
demisto.createIndicators(indicators, noUpdate=no_update)
else:
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b, noUpdate=no_update)
else:
# call createIndicators without noUpdate arg
if not indicators:
demisto.createIndicators(indicators)
else:
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif command == f'{prefix}get-indicators':
# dummy command for testing
create_relationships = params.get('create_relationships')
indicators, _ = fetch_indicators_command(client, indicator_type, feedTags, auto_detect, create_relationships, limit)
hr = tableToMarkdown('Indicators', indicators, headers=['value', 'type', 'rawJSON'])
return_results(CommandResults(readable_output=hr, raw_response=indicators))
except Exception as err:
err_msg = f'Error in {feed_name} integration [{err}]'
return_error(err_msg)
|
46,966 |
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.block_size <= 0:
block_size = tokenizer.max_len
else:
block_size = min(data_args.block_size, tokenizer.max_len)
# Main function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
lm_datasets = tokenized_datasets.map(group_texts, batched=True, load_from_cache_file=not data_args.overwrite_cache)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=lm_datasets["train"] if training_args.do_train else None,
eval_dataset=lm_datasets["validation"] if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model() # Saves the tokenizer too for easy upload
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
results["perplexity"] = perplexity
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in results.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
return results
|
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.block_size <= 0:
block_size = tokenizer.max_len
else:
block_size = min(data_args.block_size, tokenizer.max_len)
# Main function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
lm_datasets = tokenized_datasets.map(group_texts, batched=True, load_from_cache_file=not data_args.overwrite_cache)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=lm_datasets["train"] if training_args.do_train else None,
eval_dataset=lm_datasets["validation"] if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model() # Saves the tokenizer too for easy upload
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
results["perplexity"] = perplexity
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key, value in results.items():
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
return results
|
34,285 |
def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
p0 = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
results = Manager().dict()
p1 = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, status should return 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
|
def test_train_status():
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
ctx = multiprocessing.get_context("spawn")
# run a rasa server in one process
server = ctx.Process(target=subprocess.run, args=(["rasa", "run", "--enable-api"],))
p0.start()
server_ready = False
# wait until server is up before sending train request and status test loop
while not server_ready:
try:
server_ready = (
requests.get("http://localhost:5005/status").status_code == 200
)
except requests.exceptions.ConnectionError:
pass
time.sleep(1)
# use another process to hit the first server with a training request
results = Manager().dict()
p1 = ctx.Process(target=_send_train_request, args=(results, payload))
p1.start()
training_started = False
training_finished = False
# use our current process to query the status endpoint while the training is running
while not training_finished:
time.sleep(0.5)
# hit status endpoint with short timeout to ensure training doesn't block
status_resp = requests.get("http://localhost:5005/status", timeout=1)
assert status_resp.status_code == 200
if not training_started:
# make sure that we don't fail because we got status before training updated number of jobs
training_started = status_resp.json()["num_active_training_jobs"] == 1
else:
if results.get("train_response_code") is None:
assert status_resp.json()["num_active_training_jobs"] == 1
else:
# once the response code is in, training is done, status should return 0 again
assert results.get("train_response_code") == 200
training_finished = True
status_resp = requests.get("http://localhost:5005/status")
assert status_resp.json()["num_active_training_jobs"] == 0
p0.kill()
p1.join()
|
9,826 |
def add_initiator(module, array):
""""Add a host FC initiator."""
changed = False
wwn = validate_wwn(module, 'wwn is required for adding initiator.')
if module.check_mode:
module.exit_json(changed=changed)
try:
ini = array.add_initiator(
module.params['name'],
'Ansible FC initiator',
wwn)
if ini:
module.log(msg='Added initiator {0}'.format(ini['id']))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Initiator {0} add failed.'.format(wwn))
module.exit_json(changed=changed)
|
def add_initiator(module, array):
""""Add a host FC initiator."""
changed = False
wwn = validate_wwn(module, 'wwn is required for adding initiator.')
if module.check_mode:
module.exit_json(changed=changed)
try:
ini = array.add_initiator(
module.params['name'],
'Ansible FC initiator',
wwn)
if ini:
module.log(msg='Added initiator {0}'.format(ini['id']))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Initiator {0} add failed.'.format(wwn))
module.exit_json(msg=msg, changed=changed)
|
40,077 |
def kitti_poses_and_timestamps_to_trajectory(poses_file, timestamp_file):
pose_path = file_interface.read_kitti_poses_file(poses_file)
raw_timestamps_mat = file_interface.csv_read_matrix(timestamp_file)
error_msg = ("timestamp file must have same row with KITTI poses file")
if len(raw_timestamps_mat) > 0 and len(raw_timestamps_mat[0]) != 1 and len(timestamps) != pose_path.num_poses:
raise file_interface.FileInterfaceException(error_msg)
try:
timestamps_mat = np.array(raw_timestamps_mat).astype(float)
except ValueError:
raise file_interface.FileInterfaceException(error_msg)
return PoseTrajectory3D(poses_se3=pose_path.poses_se3, timestamps=timestamps_mat)
|
def kitti_poses_and_timestamps_to_trajectory(poses_file, timestamp_file):
pose_path = file_interface.read_kitti_poses_file(poses_file)
raw_timestamps_mat = file_interface.csv_read_matrix(timestamp_file)
error_msg = ("timestamp file must have same row with KITTI poses file")
if len(raw_timestamps_mat) > 0 and len(raw_timestamps_mat[0]) != 1 or len(raw_timestamps_mat) != pose_path.num_poses:
raise file_interface.FileInterfaceException(error_msg)
try:
timestamps_mat = np.array(raw_timestamps_mat).astype(float)
except ValueError:
raise file_interface.FileInterfaceException(error_msg)
return PoseTrajectory3D(poses_se3=pose_path.poses_se3, timestamps=timestamps_mat)
|
14,012 |
def _get_C_info():
"""Information on system PROJ, GDAL, GEOS
Returns
-------
c_info: dict
system PROJ information
"""
try:
import pyproj
from pyproj.exceptions import DataDirError
proj = pyproj.proj_version_str
try:
proj_dir = pyproj.datadir.get_data_dir()
except DataDirError:
proj_dir = None
except ImportError:
proj = None
proj_dir = None
try:
import shapely._buildcfg
geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)
geos_dir = shapely._buildcfg.geos_library_path
except ImportError:
geos = None
geos_dir = None
try:
import fiona
gdal = fiona.env.get_gdal_release_name()
gdal_dir = fiona.env.GDALDataFinder().search()
except ImportError:
gdal = None
gdal_dir = None
blob = [
("GEOS", geos),
("GEOS lib", geos_dir),
("GDAL", gdal),
("GDAL dir", gdal_dir),
("PROJ", proj),
("PROJ dir", proj_dir)
]
return dict(blob)
|
def _get_C_info():
"""Information on system PROJ, GDAL, GEOS
Returns
-------
c_info: dict
system PROJ information
"""
try:
import pyproj
from pyproj.exceptions import DataDirError
proj = pyproj.proj_version_str
try:
proj_dir = pyproj.datadir.get_data_dir()
except DataDirError:
proj_dir = None
except ImportError:
proj = None
proj_dir = None
try:
import shapely._buildcfg
geos = '{}.{}.{}'.format(*shapely._buildcfg.geos_version)
geos_dir = shapely._buildcfg.geos_library_path
except ImportError:
geos = None
geos_dir = None
try:
import fiona
gdal = fiona.env.get_gdal_release_name()
gdal_dir = fiona.env.GDALDataFinder().search()
except ImportError:
gdal = None
gdal_dir = None
blob = [
("GEOS", geos),
("GEOS lib", geos_dir),
("GDAL", gdal),
("GDAL dir", gdal_dir),
("PROJ", proj),
("PROJ data dir", proj_dir)
]
return dict(blob)
|
13,699 |
def plugin_settings(settings):
"""Settings for the instructor plugin."""
### Analytics Dashboard (Insights) settings
settings.ANALYTICS_DASHBOARD_URL = ""
settings.ANALYTICS_DASHBOARD_NAME = _('Your Platform Insights')
settings.FEATURES.update({
# .. toggle_name: FEATURES['DISPLAY_ANALYTICS_ENROLLMENTS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: True
# .. toggle_description: Enable display of enrollment counts in instructor dash, analytics section.
# .. toggle_use_cases: opt_out
# .. toggle_creation_date: 2014-11-12
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/5838
'DISPLAY_ANALYTICS_ENROLLMENTS': True,
# .. toggle_name: FEATURES['ENABLE_CCX_ANALYTICS_DASHBOARD_URL']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Display the 'Analytics' tab in the instructor dashboard for CCX courses.
# Note: This has no effect unless ANALYTICS_DASHBOARD_URL is already set, because without that
# setting, the tab does not show up for any courses.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2016-10-07
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/13196
'ENABLE_CCX_ANALYTICS_DASHBOARD_URL': False,
# .. toggle_name: FEATURES['MAX_ENROLLMENT_INSTR_BUTTONS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: 200
# .. toggle_description: Disable instructor dashboard buttons for downloading course data
# when enrollment exceeds this number. The number indicates the maximum allowed enrollments
# until a course considered "small". Courses exceeding the upper limit of "small" courses
# will have disabled buttons at the instructor dashboard.
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2020-07-06
# .. toggle_tickets: https://openedx.atlassian.net/browse/PROD-1740
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# .. toggle_name: FEATURES['ENABLE_GRADE_DOWNLOADS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Grade calculation started from the instructor dashboard will write
# grades CSV files to the configured storage backend and give links for downloads.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2016-07-06
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/11286
'ENABLE_GRADE_DOWNLOADS': False,
# .. toggle_name: FEATURES['ALLOW_COURSE_STAFF_GRADE_DOWNLOADS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Give course staff unrestricted access to grade downloads;
# if set to False, only edX superusers can perform the downloads.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2018-03-26
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/17385
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
# .. toggle_name: FEATURES['ALLOW_AUTOMATED_SIGNUPS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Show a section in the membership tab of the instructor dashboard
# to allow an upload of a CSV file that contains a list of new accounts to create and
# register for course.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2014-10-21
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/5670
'ALLOW_AUTOMATED_SIGNUPS': False,
# .. toggle_name: FEATURES['CERTIFICATES_INSTRUCTOR_GENERATION']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Allow batch generation of certificates from the instructor dashboard.
# In case of self-paced courses, the certificate generation button is hidden if certificate
# generation is not explicitly enabled globally or for the specific corse.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2015-07-17
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/8967
'CERTIFICATES_INSTRUCTOR_GENERATION': False,
# .. toggle_name: FEATURES['BATCH_ENROLLMENT_NOTIFY_USERS_DEFAULT']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Controls if the "Notify users by email" checkbox in the batch
# enrollment form on the instructor dashboard is already checked on page load or not.
# .. toggle_use_cases: opt_out
# .. toggle_creation_date: 2017-07-05
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/15392
'BATCH_ENROLLMENT_NOTIFY_USERS_DEFAULT': True,
})
|
def plugin_settings(settings):
"""Settings for the instructor plugin."""
### Analytics Dashboard (Insights) settings
settings.ANALYTICS_DASHBOARD_URL = ""
settings.ANALYTICS_DASHBOARD_NAME = _('Your Platform Insights')
settings.FEATURES.update({
# .. toggle_name: FEATURES['DISPLAY_ANALYTICS_ENROLLMENTS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: True
# .. toggle_description: Enable display of enrollment counts in instructor dash, analytics section.
# .. toggle_use_cases: opt_out
# .. toggle_creation_date: 2014-11-12
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/5838
'DISPLAY_ANALYTICS_ENROLLMENTS': True,
# .. toggle_name: FEATURES['ENABLE_CCX_ANALYTICS_DASHBOARD_URL']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Display the 'Analytics' tab in the instructor dashboard for CCX courses.
# Note: This has no effect unless ANALYTICS_DASHBOARD_URL is already set, because without that
# setting, the tab does not show up for any courses.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2016-10-07
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/13196
'ENABLE_CCX_ANALYTICS_DASHBOARD_URL': False,
# .. toggle_name: FEATURES['MAX_ENROLLMENT_INSTR_BUTTONS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: 200
# .. toggle_description: Disable instructor dashboard buttons for downloading course data
# when enrollment exceeds this number. The number indicates the maximum allowed enrollments
# for the course to be considered "small". Courses exceeding the upper limit of "small" courses
# will have disabled buttons at the instructor dashboard.
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2020-07-06
# .. toggle_tickets: https://openedx.atlassian.net/browse/PROD-1740
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# .. toggle_name: FEATURES['ENABLE_GRADE_DOWNLOADS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Grade calculation started from the instructor dashboard will write
# grades CSV files to the configured storage backend and give links for downloads.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2016-07-06
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/11286
'ENABLE_GRADE_DOWNLOADS': False,
# .. toggle_name: FEATURES['ALLOW_COURSE_STAFF_GRADE_DOWNLOADS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Give course staff unrestricted access to grade downloads;
# if set to False, only edX superusers can perform the downloads.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2018-03-26
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/17385
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
# .. toggle_name: FEATURES['ALLOW_AUTOMATED_SIGNUPS']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Show a section in the membership tab of the instructor dashboard
# to allow an upload of a CSV file that contains a list of new accounts to create and
# register for course.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2014-10-21
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/5670
'ALLOW_AUTOMATED_SIGNUPS': False,
# .. toggle_name: FEATURES['CERTIFICATES_INSTRUCTOR_GENERATION']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Allow batch generation of certificates from the instructor dashboard.
# In case of self-paced courses, the certificate generation button is hidden if certificate
# generation is not explicitly enabled globally or for the specific corse.
# .. toggle_use_cases: opt_in
# .. toggle_creation_date: 2015-07-17
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/8967
'CERTIFICATES_INSTRUCTOR_GENERATION': False,
# .. toggle_name: FEATURES['BATCH_ENROLLMENT_NOTIFY_USERS_DEFAULT']
# .. toggle_implementation: DjangoSetting
# .. toggle_default: False
# .. toggle_description: Controls if the "Notify users by email" checkbox in the batch
# enrollment form on the instructor dashboard is already checked on page load or not.
# .. toggle_use_cases: opt_out
# .. toggle_creation_date: 2017-07-05
# .. toggle_tickets: https://github.com/edx/edx-platform/pull/15392
'BATCH_ENROLLMENT_NOTIFY_USERS_DEFAULT': True,
})
|
52,221 |
def resolve_dependent_value(value):
"""Resolves parameter dependencies on the supplied value
Resolves parameter values, Parameterized instance methods and
parameterized functions with dependencies on the supplied value.
Args:
value: A value which will be resolved
Returns:
A new dictionary where any parameter dependencies have been
resolved.
"""
range_widget = False
if isinstance(value, (tuple, list)):
value = [resolve_dependent_value(v) for v in value]
if 'panel' in sys.modules:
from panel.widgets import RangeSlider, Widget
range_widget = isinstance(value, RangeSlider)
try:
from panel.depends import param_value_if_widget
value = param_value_if_widget(value)
except Exception:
if isinstance(value, Widget):
value = value.param.value
if is_param_method(value, has_deps=True):
value = value()
elif isinstance(value, param.Parameter) and isinstance(value.owner, param.Parameterized):
value = getattr(value.owner, value.name)
elif isinstance(value, FunctionType) and hasattr(value, '_dinfo'):
deps = value._dinfo
args = (getattr(p.owner, p.name) for p in deps.get('dependencies', []))
kwargs = {k: getattr(p.owner, p.name) for k, p in deps.get('kw', {}).items()}
value = value(*args, **kwargs)
if isinstance(value, tuple) and range_widget:
value = slice(*value)
return value
|
def resolve_dependent_value(value):
"""Resolves parameter dependencies on the supplied value
Resolves parameter values, Parameterized instance methods and
parameterized functions with dependencies on the supplied value.
Args:
value: A value which will be resolved
Returns:
A new dictionary where any parameter dependencies have been
resolved.
"""
range_widget = False
if isinstance(value, list):
value = [resolve_dependent_value(v) for v in value]
elif isinstance(value, tuple):
value = tuple([resolve_dependent_value(v) for v in value])
from panel.widgets import RangeSlider, Widget
range_widget = isinstance(value, RangeSlider)
try:
from panel.depends import param_value_if_widget
value = param_value_if_widget(value)
except Exception:
if isinstance(value, Widget):
value = value.param.value
if is_param_method(value, has_deps=True):
value = value()
elif isinstance(value, param.Parameter) and isinstance(value.owner, param.Parameterized):
value = getattr(value.owner, value.name)
elif isinstance(value, FunctionType) and hasattr(value, '_dinfo'):
deps = value._dinfo
args = (getattr(p.owner, p.name) for p in deps.get('dependencies', []))
kwargs = {k: getattr(p.owner, p.name) for k, p in deps.get('kw', {}).items()}
value = value(*args, **kwargs)
if isinstance(value, tuple) and range_widget:
value = slice(*value)
return value
|
28,083 |
def test_detection_with_external_license_directories():
test_dir = test_env.get_test_loc('plugin_license/external_licenses/scan', copy=True)
external_license_dir = f"{os.path.join(os.path.dirname(__file__), 'data')}/example_external_licenses/"
example1_dir = external_license_dir + 'example1'
result_file = test_env.get_temp_file('json')
args = [
'--license',
'--strip-root',
'--verbose',
'-dir', example1_dir,
'--json', result_file,
test_dir,
]
run_scan_click(args)
test_loc = test_env.get_test_loc('plugin_license/external_licenses/scan.expected.json')
check_json_scan(test_loc, result_file, regen=REGEN_TEST_FIXTURES)
|
def test_detection_with_external_license_directories():
test_dir = test_env.get_test_loc('plugin_license/external_licenses/scan', copy=True)
external_license_dir = test_env.get_test_loc('example_external_licenses/example1')
example1_dir = external_license_dir + 'example1'
result_file = test_env.get_temp_file('json')
args = [
'--license',
'--strip-root',
'--verbose',
'-dir', example1_dir,
'--json', result_file,
test_dir,
]
run_scan_click(args)
test_loc = test_env.get_test_loc('plugin_license/external_licenses/scan.expected.json')
check_json_scan(test_loc, result_file, regen=REGEN_TEST_FIXTURES)
|
21,982 |
def validateSessionWithToken(sydent, sid, clientSecret, token):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
:param sid: The ID of the session to validate.
:type sid: unicode
:param clientSecret: The client secret to validate.
:type clientSecret: unicode
:param token: The token to validate.
:type token: unicode
:return: A dict with a "success" key which is True if the session
was successfully validated, False otherwise.
:rtype: dict[str, bool]
:raise IncorrectClientSecretException: The provided client_secret is incorrect.
:raise SessionExpiredException: The session has expired.
:raise InvalidSessionIdException: The session ID couldn't be matched with an
existing session.
:raise IncorrectSessionTokenException: The provided token is incorrect
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", (sid,))
raise InvalidSessionIdException()
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", (sid,))
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", (s.id))
valSessionStore.setValidated(s.id, True)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return IncorrectSessionTokenException()
|
def validateSessionWithToken(sydent, sid, clientSecret, token):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
:param sid: The ID of the session to validate.
:type sid: unicode
:param clientSecret: The client secret to validate.
:type clientSecret: unicode
:param token: The token to validate.
:type token: unicode
:return: A dict with a "success" key which is True if the session
was successfully validated, False otherwise.
:rtype: dict[str, bool]
:raise IncorrectClientSecretException: The provided client_secret is incorrect.
:raise SessionExpiredException: The session has expired.
:raise InvalidSessionIdException: The session ID couldn't be matched with an
existing session.
:raise IncorrectSessionTokenException: The provided token is incorrect
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", sid)
raise InvalidSessionIdException()
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", (sid,))
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", (s.id))
valSessionStore.setValidated(s.id, True)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return IncorrectSessionTokenException()
|
45,499 |
def get_final_config(
*,
user_yaml: Optional[Union[dict, Path]] = None,
cli_config: Optional[dict] = None,
sourcedir: Optional[Path] = None,
validate: bool = True,
raise_on_invalid: bool = False,
use_external_toc: bool = True,
):
"""Create the final configuration dictionary, to parser to sphinx
:param user_config_path: A path to a YAML file written by the user
:param cli_config: Configuration coming directly from the CLI
:param sourcedir: path to source directory.
If it contains a `_static` folder, we ad that to the final `html_static_path`
:param validate: Validate user yaml against the data schema
:param raise_on_invalid: Raise a ValidationError, or only log a warning
Order of precedence is:
1. CLI Sphinx Configuration
2. User JB(YAML) Configuration
3. Default JB (YAML) Configuration
4. Default Sphinx Configuration
"""
# get the default sphinx configuration
sphinx_config = get_default_sphinx_config()
# get the default yaml configuration
yaml_config, default_yaml_update, add_paths = yaml_to_sphinx(
yaml.safe_load(PATH_YAML_DEFAULT.read_text(encoding="utf8"))
)
yaml_config.update(default_yaml_update)
# if available, get the user defined configuration
user_yaml_recurse, user_yaml_update = {}, {}
user_yaml_path = None
if user_yaml:
if isinstance(user_yaml, Path):
user_yaml_path = user_yaml
user_yaml = yaml.safe_load(user_yaml.read_text(encoding="utf8"))
else:
user_yaml = user_yaml
if validate:
validate_yaml(user_yaml, raise_on_errors=raise_on_invalid)
user_yaml_recurse, user_yaml_update, add_paths = yaml_to_sphinx(user_yaml)
# add paths from yaml config
if user_yaml_path:
for path in add_paths:
path = (user_yaml_path.parent / path).resolve()
sys.path.append(path.as_posix())
# first merge the user yaml into the default yaml
_recursive_update(yaml_config, user_yaml_recurse)
# then merge this into the default sphinx config
_recursive_update(sphinx_config, yaml_config)
# TODO: deprecate this in version 0.14
# Check user specified mathjax_config for sphinx >= 4
# https://github.com/executablebooks/jupyter-book/issues/1502
if sphinx.version_info[0] >= 4 and "mathjax_config" in user_yaml_update:
# Switch off warning if user has specified mathjax v2
if (
"mathjax_path" in user_yaml_update
and "@2" in user_yaml_update["mathjax_path"]
):
# use mathjax2_config so not to tigger deprecation warning in future
user_yaml_update["mathjax2_config"] = user_yaml_update.pop("mathjax_config")
else:
_message_box(
(
f"[Warning] Mathjax configuration has changed for sphinx>=4.0 [Using sphinx: {sphinx.__version__}]\n" # noqa: E501
"Your _config.yml needs to be updated:\n" # noqa: E501
"mathjax_config -> mathjax3_config\n" # noqa: E501
"To continue using `mathjax v2` you will need to use the `mathjax_path` configuration\n" # noqa: E501
"\n"
"See Sphinx Documentation:\n"
"https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-sphinx.ext.mathjax" # noqa: E501
),
color="orange",
print_func=print,
)
# Automatically make the configuration name substitution so older projects build
user_yaml_update["mathjax3_config"] = user_yaml_update.pop("mathjax_config")
# Recursively update sphinx config if option is specified,
# otherwise forcefully override options non-recursively
if sphinx_config.pop("recursive_update"):
_recursive_update(sphinx_config, user_yaml_update)
else:
sphinx_config.update(user_yaml_update)
# This is to deal with a special case, where the override needs to be applied after
# the sphinx app is initialised (since the default is a function)
# TODO I'm not sure if there is a better way to deal with this?
config_meta = {
"latex_doc_overrides": sphinx_config.pop("latex_doc_overrides"),
"latex_individualpages": cli_config.pop("latex_individualpages"),
}
if sphinx_config.get("use_jupyterbook_latex"):
sphinx_config["extensions"].append("sphinx_jupyterbook_latex")
# finally merge in CLI configuration
_recursive_update(sphinx_config, cli_config or {})
# Add the `_static` folder to html_static_path, only if it exists
if sourcedir and Path(sourcedir).joinpath("_static").is_dir():
paths_static = sphinx_config.get("html_static_path", [])
paths_static.append("_static")
sphinx_config["html_static_path"] = paths_static
if not use_external_toc:
# TODO perhaps a better logic for this?
# remove all configuration related to sphinx_external_toc
try:
idx = sphinx_config["extensions"].index("sphinx_external_toc")
except ValueError:
pass
else:
sphinx_config["extensions"].pop(idx)
sphinx_config.pop("external_toc_path", None)
sphinx_config.pop("external_toc_exclude_missing", None)
return sphinx_config, config_meta
|
def get_final_config(
*,
user_yaml: Optional[Union[dict, Path]] = None,
cli_config: Optional[dict] = None,
sourcedir: Optional[Path] = None,
validate: bool = True,
raise_on_invalid: bool = False,
use_external_toc: bool = True,
):
"""Create the final configuration dictionary, to parser to sphinx
:param user_config_path: A path to a YAML file written by the user
:param cli_config: Configuration coming directly from the CLI
:param sourcedir: path to source directory.
If it contains a `_static` folder, we ad that to the final `html_static_path`
:param validate: Validate user yaml against the data schema
:param raise_on_invalid: Raise a ValidationError, or only log a warning
Order of precedence is:
1. CLI Sphinx Configuration
2. User JB(YAML) Configuration
3. Default JB (YAML) Configuration
4. Default Sphinx Configuration
"""
# get the default sphinx configuration
sphinx_config = get_default_sphinx_config()
# get the default yaml configuration
yaml_config, default_yaml_update, add_paths = yaml_to_sphinx(
yaml.safe_load(PATH_YAML_DEFAULT.read_text(encoding="utf8"))
)
yaml_config.update(default_yaml_update)
# if available, get the user defined configuration
user_yaml_recurse, user_yaml_update = {}, {}
user_yaml_path = None
if user_yaml:
if isinstance(user_yaml, Path):
user_yaml_path = user_yaml
user_yaml = yaml.safe_load(user_yaml.read_text(encoding="utf8"))
else:
user_yaml = user_yaml
if validate:
validate_yaml(user_yaml, raise_on_errors=raise_on_invalid)
user_yaml_recurse, user_yaml_update, add_paths = yaml_to_sphinx(user_yaml)
# add paths from yaml config
if user_yaml_path:
for path in add_paths:
path = (user_yaml_path.parent / path).resolve()
sys.path.append(path.as_posix())
# first merge the user yaml into the default yaml
_recursive_update(yaml_config, user_yaml_recurse)
# then merge this into the default sphinx config
_recursive_update(sphinx_config, yaml_config)
# TODO: deprecate this in version 0.14
# Check user specified mathjax_config for sphinx >= 4
# https://github.com/executablebooks/jupyter-book/issues/1502
if sphinx.version_info[0] >= 4 and "mathjax_config" in user_yaml_update:
# Switch off warning if user has specified mathjax v2
if (
"mathjax_path" in user_yaml_update
and "@2" in user_yaml_update["mathjax_path"]
):
# use mathjax2_config so not to tigger deprecation warning in future
user_yaml_update["mathjax2_config"] = user_yaml_update.pop("mathjax_config")
else:
_message_box(
(
f"[Warning] Mathjax configuration has changed for sphinx>=4.0 [Using sphinx: {sphinx.__version__}]\n" # noqa: E501
"Your _config.yml needs to be updated:\n" # noqa: E501
"mathjax_config -> mathjax3_config\n" # noqa: E501
"To continue using `mathjax v2` you will need to use the `mathjax_path` configuration\n" # noqa: E501
"\n"
"See Sphinx Documentation:\n"
"https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-sphinx.ext.mathjax" # noqa: E501
),
color="orange",
print_func=print,
)
# Automatically make the configuration name substitution so older projects build
user_yaml_update["mathjax3_config"] = user_yaml_update.pop("mathjax_config")
# Recursively update sphinx config if option is specified,
# otherwise forcefully override options non-recursively
if sphinx_config.pop("recursive_update") is True:
_recursive_update(sphinx_config, user_yaml_update)
else:
sphinx_config.update(user_yaml_update)
# This is to deal with a special case, where the override needs to be applied after
# the sphinx app is initialised (since the default is a function)
# TODO I'm not sure if there is a better way to deal with this?
config_meta = {
"latex_doc_overrides": sphinx_config.pop("latex_doc_overrides"),
"latex_individualpages": cli_config.pop("latex_individualpages"),
}
if sphinx_config.get("use_jupyterbook_latex"):
sphinx_config["extensions"].append("sphinx_jupyterbook_latex")
# finally merge in CLI configuration
_recursive_update(sphinx_config, cli_config or {})
# Add the `_static` folder to html_static_path, only if it exists
if sourcedir and Path(sourcedir).joinpath("_static").is_dir():
paths_static = sphinx_config.get("html_static_path", [])
paths_static.append("_static")
sphinx_config["html_static_path"] = paths_static
if not use_external_toc:
# TODO perhaps a better logic for this?
# remove all configuration related to sphinx_external_toc
try:
idx = sphinx_config["extensions"].index("sphinx_external_toc")
except ValueError:
pass
else:
sphinx_config["extensions"].pop(idx)
sphinx_config.pop("external_toc_path", None)
sphinx_config.pop("external_toc_exclude_missing", None)
return sphinx_config, config_meta
|
10,367 |
def delegate_remote(args, exclude, require, integration_targets):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:type integration_targets: tuple[IntegrationTarget]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
version = parts[1]
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
success = False
raw = False
if isinstance(args, ShellConfig):
use_httptester = args.httptester
raw = args.raw
else:
use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
if use_httptester and not docker_available():
display.warning('Assuming --disable-httptester since `docker` is not available.')
use_httptester = False
httptester_id = None
ssh_options = []
content_root = None
try:
core_ci.start()
if use_httptester:
httptester_id, ssh_options = start_httptester(args)
core_ci.wait()
python_version = get_python_version(args, get_remote_completion(), args.remote)
if platform == 'windows':
# Windows doesn't need the ansible-test fluff, just run the SSH command
manage = ManageWindowsCI(core_ci)
manage.setup(python_version)
cmd = ['powershell.exe']
elif raw:
manage = ManagePosixCI(core_ci)
manage.setup(python_version)
cmd = create_shell_command(['bash'])
else:
manage = ManagePosixCI(core_ci)
pwd = manage.setup(python_version)
options = {
'--remote': 1,
}
python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
install_root = os.path.join(pwd, 'ansible')
if data_context().content.collection:
content_root = os.path.join(install_root, data_context().content.collection.directory)
else:
content_root = install_root
cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require)
if httptester_id:
cmd += ['--inject-httptester']
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
if isinstance(args, IntegrationConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
ssh_options += cloud_platform.get_remote_ssh_options()
try:
manage.ssh(cmd, ssh_options)
success = True
finally:
download = False
if platform != 'windows':
download = True
if isinstance(args, ShellConfig):
if args.raw:
download = False
if download and content_root:
local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
remote_results_root = os.path.join(content_root, data_context().content.results_path)
remote_results_name = os.path.basename(remote_results_root)
remote_temp_path = os.path.join('/tmp', remote_results_name)
# AIX cp and GNU cp provide different options, no way could be found to have a commen
# patttern and achieve the same goal
cp_opts = '-hr' if platform in ['aix', 'ibmi'] else '-a'
manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
manage.download(remote_temp_path, local_test_root)
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
if httptester_id:
docker_rm(args, httptester_id)
|
def delegate_remote(args, exclude, require, integration_targets):
"""
:type args: EnvironmentConfig
:type exclude: list[str]
:type require: list[str]
:type integration_targets: tuple[IntegrationTarget]
"""
parts = args.remote.split('/', 1)
platform = parts[0]
version = parts[1]
core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
success = False
raw = False
if isinstance(args, ShellConfig):
use_httptester = args.httptester
raw = args.raw
else:
use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
if use_httptester and not docker_available():
display.warning('Assuming --disable-httptester since `docker` is not available.')
use_httptester = False
httptester_id = None
ssh_options = []
content_root = None
try:
core_ci.start()
if use_httptester:
httptester_id, ssh_options = start_httptester(args)
core_ci.wait()
python_version = get_python_version(args, get_remote_completion(), args.remote)
if platform == 'windows':
# Windows doesn't need the ansible-test fluff, just run the SSH command
manage = ManageWindowsCI(core_ci)
manage.setup(python_version)
cmd = ['powershell.exe']
elif raw:
manage = ManagePosixCI(core_ci)
manage.setup(python_version)
cmd = create_shell_command(['bash'])
else:
manage = ManagePosixCI(core_ci)
pwd = manage.setup(python_version)
options = {
'--remote': 1,
}
python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
install_root = os.path.join(pwd, 'ansible')
if data_context().content.collection:
content_root = os.path.join(install_root, data_context().content.collection.directory)
else:
content_root = install_root
cmd = generate_command(args, python_interpreter, os.path.join(install_root, 'bin'), content_root, options, exclude, require)
if httptester_id:
cmd += ['--inject-httptester']
if isinstance(args, TestConfig):
if args.coverage and not args.coverage_label:
cmd += ['--coverage-label', 'remote-%s-%s' % (platform, version)]
if isinstance(args, IntegrationConfig):
if not args.allow_destructive:
cmd.append('--allow-destructive')
# remote instances are only expected to have a single python version available
if isinstance(args, UnitsConfig) and not args.python:
cmd += ['--python', 'default']
if isinstance(args, IntegrationConfig):
cloud_platforms = get_cloud_providers(args)
for cloud_platform in cloud_platforms:
ssh_options += cloud_platform.get_remote_ssh_options()
try:
manage.ssh(cmd, ssh_options)
success = True
finally:
download = False
if platform != 'windows':
download = True
if isinstance(args, ShellConfig):
if args.raw:
download = False
if download and content_root:
local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
remote_results_root = os.path.join(content_root, data_context().content.results_path)
remote_results_name = os.path.basename(remote_results_root)
remote_temp_path = os.path.join('/tmp', remote_results_name)
# AIX cp and GNU cp provide different options, no way could be found to have a common
# patttern and achieve the same goal
cp_opts = '-hr' if platform in ['aix', 'ibmi'] else '-a'
manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
manage.download(remote_temp_path, local_test_root)
finally:
if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
core_ci.stop()
if httptester_id:
docker_rm(args, httptester_id)
|
9,359 |
def test_wrap_var_dict_None():
assert not isinstance(wrap_var(dict(foo=None))['foo'], AnsibleUnsafe)
|
def test_wrap_var_dict_None():
assert wrap_var(dict(foo=None))['foo'] is None
|
34,368 |
def _get_requested_conversation_ids(
conversation_ids_arg: Optional[Text] = None,
) -> Optional[List[Text]]:
"""Get list of conversation IDs requested as a command-line argument.
Args:
conversation_ids_arg: Value of '--conversation-ids' command-line argument. If
provided, this is a string of comma-separated conversation IDs.
Return:
List of conversation IDs requested as a command-line argument. `None` if that
argument was left unspecified.
"""
if not conversation_ids_arg:
return None
return conversation_ids_arg.split(",")
|
def _get_requested_conversation_ids(
conversation_ids_arg: Optional[Text] = None,
) -> Optional[List[Text]]:
"""Get list of conversation IDs requested as a command-line argument.
Args:
conversation_ids_arg: Value of `--conversation-ids` command-line argument. If
provided, this is a string of comma-separated conversation IDs.
Return:
List of conversation IDs requested as a command-line argument. `None` if that
argument was left unspecified.
"""
if not conversation_ids_arg:
return None
return conversation_ids_arg.split(",")
|
58,555 |
def _get_wheel_name(minor_version_number):
if minor_version_number:
matches = [
file for file in glob.glob(
f"{_get_root_dir()}/.whl/ray-*{PYTHON_WHL_VERSION}"
f"{minor_version_number}*-manylinux*")
if "+" not in file # Exclude dbg, asan builds
]
assert len(matches) == 1, (
f"Found ({len(matches)}) matches for 'ray-*{PYTHON_WHL_VERSION}"
f"{minor_version_number}*-manylinux*' instead of 1."
f"wheel matches: {matches}")
return os.path.basename(matches[0])
else:
matches = glob.glob(
f"{_get_root_dir()}/.whl/*{PYTHON_WHL_VERSION}*-manylinux*")
return [os.path.basename(i) for i in matches]
|
def _get_wheel_name(minor_version_number):
if minor_version_number:
matches = [
file for file in glob.glob(
f"{_get_root_dir()}/.whl/ray-*{PYTHON_WHL_VERSION}"
f"{minor_version_number}*-manylinux*")
if "+" not in file # Exclude dbg, asan builds
]
assert len(matches) == 1, (
f"Found ({len(matches)}) matches for 'ray-*{PYTHON_WHL_VERSION}"
f"{minor_version_number}*-manylinux*' instead of 1.\n"
f"wheel matches: {matches}")
return os.path.basename(matches[0])
else:
matches = glob.glob(
f"{_get_root_dir()}/.whl/*{PYTHON_WHL_VERSION}*-manylinux*")
return [os.path.basename(i) for i in matches]
|
30,678 |
def set_docker_hardening_for_build(client, prints_manager):
"""Sets docker hardening configuration
Args:
client (demisto_client): The configured client to use.
prints_manager (ParallelPrintsManager): Print manager object
Returns:
None
"""
host = client.api_client.configuration.host
installed_content_message = \
'\nMaking "POST" request to server - "{}" to set docker hardening server configuration.'.format(host)
prints_manager.add_print_job(installed_content_message, print_color, 0, LOG_COLORS.GREEN)
# make request to update server configs
server_configuration = {
'docker.cpu.limit': '1.0',
'docker.run.internal.asuser': 'true',
'limit.docker.cpu': 'true',
'python.pass.extra.keys': '--memory=1g##--memory-swap=-1##--pids-limit=256##--ulimit=nofile=1024:8192'
}
error_msg = "Failed to set docker hardening server config - with status code "
response_data, status_code, _ = update_server_configuration(client, server_configuration, error_msg)
|
def set_docker_hardening_for_build(client, prints_manager):
"""Sets docker hardening configuration
Args:
client (demisto_client): The configured client to use.
prints_manager (ParallelPrintsManager): Print manager object
Returns:
None
"""
host = client.api_client.configuration.host
installed_content_message = \
'\nMaking "POST" request to server - "{}" to set docker hardening server configuration.'.format(host)
prints_manager.add_print_job(installed_content_message, print_color, 0, LOG_COLORS.GREEN)
# make request to update server configs
server_configuration = {
'docker.cpu.limit': '1.0',
'docker.run.internal.asuser': 'true',
'limit.docker.cpu': 'true',
'python.pass.extra.keys': '--memory=1g##--memory-swap=-1##--pids-limit=256##--ulimit=nofile=1024:8192'
}
error_msg = "Failed to set docker hardening server config - with status code "
return update_server_configuration(client, server_configuration, error_msg)
|
7,835 |
def test_plane():
s = openmc.Plane(a=1, b=2, c=-1, d=3, name='my plane')
assert s.a == 1
assert s.b == 2
assert s.c == -1
assert s.d == 3
assert s.boundary_type == 'transmission'
assert s.name == 'my plane'
assert s.type == 'plane'
# Generic planes don't have well-defined bounding boxes
assert_infinite_bb(s)
# evaluate method
x, y, z = (4, 3, 6)
assert s.evaluate((x, y, z)) == pytest.approx(s.a*x + s.b*y + s.c*z - s.d)
# translate method
st = s.translate((1.0, 0.0, 0.0))
assert (st.a, st.b, st.c, st.d) == (s.a, s.b, s.c, 4)
# rotate method
yp = openmc.YPlane(np.abs(s.d)/np.sqrt(s.a**2 + s.b**2 + s.c**2))
psi = np.rad2deg(np.arctan2(1, 2))
phi = np.rad2deg(np.arctan2(1, np.sqrt(5)))
sr = s.rotate((phi, 0., psi), order='zyx')
assert yp.normalize() == pytest.approx(sr.normalize())
# test rotation ordering
phi = np.rad2deg(np.arctan2(1, np.sqrt(2)))
sr = s.rotate((0., -45., phi), order='xyz')
assert yp.normalize() == pytest.approx(sr.normalize())
# Make sure repr works
repr(s)
|
def test_plane():
s = openmc.Plane(a=1, b=2, c=-1, d=3, name='my plane')
assert s.a == 1
assert s.b == 2
assert s.c == -1
assert s.d == 3
assert s.boundary_type == 'transmission'
assert s.name == 'my plane'
assert s.type == 'plane'
# Generic planes don't have well-defined bounding boxes
assert_infinite_bb(s)
# evaluate method
x, y, z = (4, 3, 6)
assert s.evaluate((x, y, z)) == pytest.approx(s.a*x + s.b*y + s.c*z - s.d)
# translate method
st = s.translate((1.0, 0.0, 0.0))
assert (st.a, st.b, st.c, st.d) == (s.a, s.b, s.c, 4)
# rotate method
yp = openmc.YPlane(np.abs(s.d)/np.sqrt(s.a**2 + s.b**2 + s.c**2))
psi = np.rad2deg(np.arctan2(1, 2))
phi = np.rad2deg(np.arctan2(1, np.sqrt(5)))
sr = s.rotate((phi, 0., psi), order='zyx')
assert yp.normalize() == pytest.approx(sr.normalize())
# test rotation ordering
phi = math.degrees(math.atan2(1, math.sqrt(2)))
sr = s.rotate((0., -45., phi), order='xyz')
assert yp.normalize() == pytest.approx(sr.normalize())
# Make sure repr works
repr(s)
|
37,036 |
def job_monitor(job, interval=None, monitor_async=False, quiet=False, to_file=None):
"""Monitor the status of a IBMQJob instance.
Args:
job (BaseJob): Job to monitor.
interval (int): Time interval between status queries.
monitor_async (bool): Monitor asyncronously (in Jupyter only).
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
Raises:
QiskitError: When trying to run async outside of Jupyter
ImportError: ipywidgets not available for notebook.
"""
if interval is None:
_interval_set = False
interval = 2
else:
_interval_set = True
if _NOTEBOOK_ENV:
if monitor_async:
try:
import ipywidgets as widgets # pylint: disable=import-error
except ImportError:
raise ImportError('These functions need ipywidgets. '
'Run "pip install ipywidgets" before.')
from qiskit.tools.jupyter.jupyter_magics import _html_checker # pylint: disable=C0412
style = "font-size:16px;"
header = "<p style='{style}'>Job Status: %s </p>".format(
style=style)
status = widgets.HTML(value=header % job.status().value)
display(status)
thread = threading.Thread(target=_html_checker, args=(job, interval,
status, header))
thread.start()
else:
_text_checker(job, interval, _interval_set,
quiet=quiet, to_file=to_file)
else:
if monitor_async:
raise QiskitError(
'monitor_async only available in Jupyter notebooks.')
_text_checker(job, interval, _interval_set, quiet=quiet, to_file=to_file)
|
def job_monitor(job, interval=None, monitor_async=False, quiet=False, output=sys.stdout):
"""Monitor the status of a IBMQJob instance.
Args:
job (BaseJob): Job to monitor.
interval (int): Time interval between status queries.
monitor_async (bool): Monitor asyncronously (in Jupyter only).
quiet (bool): If True, do not print status messages.
to_file (file): If file print status messages to it, else to stdout.
Raises:
QiskitError: When trying to run async outside of Jupyter
ImportError: ipywidgets not available for notebook.
"""
if interval is None:
_interval_set = False
interval = 2
else:
_interval_set = True
if _NOTEBOOK_ENV:
if monitor_async:
try:
import ipywidgets as widgets # pylint: disable=import-error
except ImportError:
raise ImportError('These functions need ipywidgets. '
'Run "pip install ipywidgets" before.')
from qiskit.tools.jupyter.jupyter_magics import _html_checker # pylint: disable=C0412
style = "font-size:16px;"
header = "<p style='{style}'>Job Status: %s </p>".format(
style=style)
status = widgets.HTML(value=header % job.status().value)
display(status)
thread = threading.Thread(target=_html_checker, args=(job, interval,
status, header))
thread.start()
else:
_text_checker(job, interval, _interval_set,
quiet=quiet, to_file=to_file)
else:
if monitor_async:
raise QiskitError(
'monitor_async only available in Jupyter notebooks.')
_text_checker(job, interval, _interval_set, quiet=quiet, to_file=to_file)
|
32,392 |
def main():
params = demisto.params()
access_token = params.get('access_token')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
email = params.get('email')
command = demisto.command()
commands = {
'drift-post-contact': post_contact_command,
'drift-get-contact': get_contact_command,
'drift-update-contact': patch_contact_command,
'drift-delete-contact': delete_contact_command
}
LOG(f'Command being called is {command}')
try:
client = Client(
API_ENDPOINT,
access_token,
verify_certificate,
proxy,
email
)
args = demisto.args()
if command == 'test-module':
test_module(client)
elif command in commands:
commands[command](client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {command} command. Error: {str(e)}')
|
def main():
params = demisto.params()
access_token = params.get('access_token')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
email = params.get('email')
command = demisto.command()
commands = {
'drift-post-contact': post_contact_command,
'drift-get-contact': get_contact_command,
'drift-update-contact': patch_contact_command,
'drift-delete-contact': delete_contact_command
}
demisto.debug(f'Command being called is {command}')
try:
client = Client(
API_ENDPOINT,
access_token,
verify_certificate,
proxy,
email
)
args = demisto.args()
if command == 'test-module':
test_module(client)
elif command in commands:
commands[command](client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {command} command. Error: {str(e)}')
|
54,233 |
def assert_optimizes(
before: cirq.Circuit,
expected: cirq.Circuit,
additional_gates: Optional[Sequence[Type[cirq.Gate]]] = None,
):
if additional_gates is None:
gateset = cirq.CZTargetGateset()
else:
gateset = cirq.CZTargetGateset(additional_gates=additional_gates)
cirq.testing.assert_same_circuits(
cirq.optimize_for_target_gateset(before, gateset=gateset, ignore_failures=False), expected
)
|
def assert_optimizes(
before: cirq.Circuit,
expected: cirq.Circuit,
additional_gates: Sequence[Type[cirq.Gate]] = (cirq.GlobalPhaseGate,),
):
gateset = cirq.CZTargetGateset(additional_gates=additional_gates)
cirq.testing.assert_same_circuits(
cirq.optimize_for_target_gateset(before, gateset=gateset, ignore_failures=False), expected
)
|
32,215 |
def jq_wrap(json_str, query):
if type(json_str) == str:
j = json.loads(json_str)
else:
j = json_str
res = pyjq.all(query, j)
try:
demisto.executeCommand('Set', {'key': 'jq.result', 'value': res})
except:
pass # ignore issue when can't set context - script executed as transform script
return_results(res)
|
def jq_wrap(json_str, query):
j = json.loads(json_str)
res = pyjq.all(query, j)
try:
demisto.executeCommand('Set', {'key': 'jq.result', 'value': res})
except:
pass # ignore issue when can't set context - script executed as transform script
return_results(res)
|
32,610 |
def branch_create_command(client: Client, args: Dict) -> CommandResults:
repo = args.get('repo', None)
name = args.get('name', None)
target_branch = args.get('target_branch', None)
if not repo:
repo = client.repository
response = client.branch_create_request(name, target_branch, repo)
return CommandResults(
readable_output=f'The branch {name} was created successfully.',
outputs_prefix='Bitbucket.Branch',
outputs=response,
raw_response=response
)
|
def branch_create_command(client: Client, args: Dict) -> CommandResults:
repo = args.get('repo', None)
name = args.get('name', None)
target_branch = args.get('target_branch', None)
if not repo:
repo = client.repository
response = client.branch_create_request(name, target_branch, repo)
return CommandResults(
readable_output=f'The branch "{name}" was created successfully.',
outputs_prefix='Bitbucket.Branch',
outputs=response,
raw_response=response
)
|
49,099 |
def test_unevaluated_relationals():
from sympy.parsing.latex import parse_latex
from sympy.core.relational import Eq, Ne
op_ans = {
'=': 'Eq(2*x, x + x)',
'\\leq': '2*x <= x + x',
'\\geq': '2*x >= x + x',
'<': '2*x < x + x',
'>': '2*x > x + x',
'\\neq': 'Ne(2*x, x + x)'}
for o, a in op_ans.items():
assert str(parse_latex('2*x %s x+x' % o)) == a
|
def test_unevaluated_relationals():
from sympy.parsing.latex import parse_latex
op_ans = {
'=': 'Eq(2*x, x + x)',
'\\leq': '2*x <= x + x',
'\\geq': '2*x >= x + x',
'<': '2*x < x + x',
'>': '2*x > x + x',
'\\neq': 'Ne(2*x, x + x)'}
for op, ans in op_ans.items():
assert str(parse_latex('2*x %s x+x' % op)) == ans
|
23,592 |
def depo_velocity(T, WindSpeed, LUC):
# convert temperature into Kelvin
T = T + 273.15
# save wind data
if(np.isscalar(WindSpeed)):
u = np.array([WindSpeed])
else:
u = WindSpeed
g = 9.81 # gravity in m/s^2
# Na = 6.022 * 10**23 # avagadros number
R = 8.314 # Universal gas consant in m3Pa/Kmol
k = 1.38 * 10**-23 # Boltzmann's constant in m^2kg/sK
P = 101300 # pressure in Pa
rhoair = 1.2041 # density of air in kg/m3
z0 = 1
rhop = 1500 # Assume density of particle in kg/m^3
switcher = {
1: 0.56,
4: 0.56,
6: 0.54,
8: 0.54,
10: 0.54,
}
try:
gamma = switcher[LUC]
except Exception as e:
warnings.warn("Unknown Land Use Category, assuming LUC 8. "+str(e))
LUC = 8
gamma = switcher[LUC]
# Diameter of particle in um
Dpum = np.array([2.5, 10])
Dpm = Dpum*10**-6 # Diameter of particle in m
# Calculations
mu = 1.8*10**-5*(T/298)**0.85 # viscosity of air in kg/m s
nu = mu/rhoair
lambda1 = 2*mu/(P*(8.*0.0288/(np.pi*R*T))**(0.5)) # mean free path
ll = np.array([lambda1, lambda1])
Cc = 1+2*ll/Dpm*(1.257+0.4*np.exp(-1.1*Dpm/(ll*2)))
# slip correction coefficient
# Calculate vs
vs = rhop*Dpm**2*(g*Cc/(mu*18)) # particle settling velocity
# Calculate rb
ustar = np.zeros_like(u, dtype=float) # pre-allocate ustar
# Equation 11.66 in Ramaswami (and 16.67 and Sienfeld &Pandis)
ustar[u > 0] = 0.4 * u[u > 0]/np.log(10/z0)
ustar[u <= 0] = 0.001
D = k*T*(Cc/(3*np.pi*mu*Dpm))
Sc = nu/D
# gamma=0.56 # for urban
# alpha=1.5 # for urban
EB = Sc**(-1 * gamma)
St = vs*(ustar**2)/(g*nu)
EIM = 10.0**(-3.0/St) # For smooth surfaces
# EIM =((St)./(0.82+St)).^2
R1 = np.exp(-St**(0.5)) # percentage of particles that stick
rb = 1/(3*(EB+EIM)*ustar*R1)
# Calculate ra
a = np.array([-0.096, -0.037, -0.002, 0, 0.004, 0.035])
b = np.array([0.029, 0.029, 0.018, 0, -0.018, -0.036])
# For wind speeds <= 3, use a = -0.037 and b = 0.029
# For wind speeds >3 and <=5, use a = -.002, b = 0.018
# For wind speeds > 5, use a = 0, b = 0
avals = a[1]*np.ones_like(u, dtype=float)
avals[u > 3] = a[2]
avals[u > 5] = a[3]
bvals = b[1]*np.ones_like(u, dtype=float)
bvals[u > 3] = b[2]
bvals[u > 5] = b[3]
L = 1/(avals + bvals*np.log(z0))
zeta0 = z0/L
zeta = 10.0/L
eta = ((1-15*zeta)**(0.25))
eta0 = ((1-15*zeta0)**(0.25))
ra = np.zeros_like(zeta, dtype=float) # Preallocate memory
ra[zeta == 0] = (1 / (0.4 * ustar[zeta == 0])) * np.log(10.0 / z0)
ra[zeta > 0] = (1 / (0.4 * ustar[zeta > 0]))*(np.log(10.0/z0)
+ 4.7*(zeta[zeta > 0] - zeta0[zeta > 0]))
ra[zeta < 0] = (1 / (0.4 * ustar[zeta < 0])) * (np.log(10.0 / z0)
+ np.log((eta0[zeta < 0]**2 + 1) * (eta0[zeta < 0]+1)**2
/ ((eta[zeta < 0]**2 + 1) * (eta[zeta < 0]+1)**2))
+ 2*(np.arctan(eta[zeta < 0])-np.arctan(eta0[zeta < 0])))
# Calculate vd and mass flux
vd = 1/(ra+rb)+vs
return vd
|
def depo_velocity(T, WindSpeed, LUC):
# convert temperature into Kelvin
T = T + 273.15
# save wind data
if(np.isscalar(WindSpeed)):
u = np.array([WindSpeed])
else:
u = WindSpeed
g = 9.81 # gravity in m/s^2
# Na = 6.022 * 10**23 # avagadros number
R = 8.314 # Universal gas consant in m3Pa/Kmol
k = 1.38 * 10**-23 # Boltzmann's constant in m^2kg/sK
P = 101300 # pressure in Pa
rhoair = 1.2041 # density of air in kg/m3
z0 = 1
rhop = 1500 # Assume density of particle in kg/m^3
switcher = {
1: 0.56,
4: 0.56,
6: 0.54,
8: 0.54,
10: 0.54,
}
try:
gamma = switcher[LUC]
except Exception as e:
warnings.warn("Unknown Land Use Category, assuming LUC 8. "+str(e))
LUC = 8
gamma = switcher[LUC]
# Diameter of particle in um
Dpum = np.array([2.5, 10])
Dpm = Dpum*10**-6 # Diameter of particle in m
# Calculations
mu = 1.8*10**-5*(T/298)**0.85 # viscosity of air in kg/m s
nu = mu/rhoair
lambda1 = 2*mu/(P*(8.*0.0288/(np.pi*R*T))**(0.5)) # mean free path
ll = np.array([lambda1, lambda1])
Cc = 1+2*ll/Dpm*(1.257+0.4*np.exp(-1.1*Dpm/(ll*2)))
# slip correction coefficient
# Calculate vs
vs = rhop*Dpm**2*(g*Cc/(mu*18)) # particle settling velocity
# Calculate rb
ustar = np.zeros_like(u, dtype=float) # pre-allocate ustar
# Equation 11.66 in Ramaswami (and 16.67 and Sienfeld &Pandis)
ustar[u > 0] = 0.4 * u[u > 0]/np.log(10/z0)
ustar[u <= 0] = 0.001
D = k*T*(Cc/(3*np.pi*mu*Dpm))
Sc = nu/D
# gamma=0.56 # for urban
# alpha=1.5 # for urban
EB = Sc**(-1 * gamma)
St = vs*(ustar**2)/(g*nu)
EIM = 10.0**(-3.0/St) # For smooth surfaces
# EIM =((St)./(0.82+St)).^2
R1 = np.exp(-St**(0.5)) # percentage of particles that stick
rb = 1/(3*(EB+EIM)*ustar*R1)
# Calculate ra
a = np.array([-0.096, -0.037, -0.002, 0, 0.004, 0.035])
b = np.array([0.029, 0.029, 0.018, 0, -0.018, -0.036])
# For wind speeds <= 3, use a = -0.037 and b = 0.029
# For wind speeds >3 and <=5, use a = -.002, b = 0.018
# For wind speeds > 5, use a = 0, b = 0
avals = a[1]*np.ones_like(u, dtype=float)
avals[u > 3] = a[2]
avals[u > 5] = a[3]
bvals = b[1]*np.ones_like(u, dtype=float)
bvals[u > 3] = b[2]
bvals[u > 5] = b[3]
L = 1/(avals + bvals*np.log(z0))
zeta0 = z0/L
zeta = 10.0/L
eta = ((1-15*zeta)**(0.25))
eta0 = ((1-15*zeta0)**(0.25))
ra = np.zeros_like(zeta, dtype=float) # Preallocate memory
ra[zeta == 0] = (1 / (0.4 * ustar[zeta == 0])) * np.log(10.0 / z0)
ra[zeta > 0] = (1 / (0.4 * ustar[zeta > 0])) * (np.log(10.0 / z0)
+ 4.7*(zeta[zeta > 0] - zeta0[zeta > 0]))
ra[zeta < 0] = (1 / (0.4 * ustar[zeta < 0])) * (np.log(10.0 / z0)
+ np.log((eta0[zeta < 0]**2 + 1) * (eta0[zeta < 0]+1)**2
/ ((eta[zeta < 0]**2 + 1) * (eta[zeta < 0]+1)**2))
+ 2*(np.arctan(eta[zeta < 0])-np.arctan(eta0[zeta < 0])))
# Calculate vd and mass flux
vd = 1/(ra+rb)+vs
return vd
|
58,817 |
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim == 1:
nrhs = 1
elif b.ndim == 2:
nrhs = b.shape[1]
else:
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
dtype = numpy.promote_types(a.dtype.char, 'f')
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual:{})'.format(a.dtype))
geqrf_helper = getattr(cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(cusolver, t + 'geqrf')
trsm = getattr(cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(cusolver, t + 'unmqr')
no_trans = cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = cublas.CUBLAS_OP_T
else:
trans = cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
nrhs = b.shape[1] if b.ndim == 2 else 1
dev_info = cupy.empty(1, dtype=numpy.int32)
tau = cupy.empty(mn_min, dtype=dtype)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
a_data_ptr = a.data.ptr
b_data_ptr = b.data.ptr
a = cupy.asfortranarray(a, dtype=dtype)
b = cupy.asfortranarray(b, dtype=dtype)
if a.data.ptr == a_data_ptr:
a = a.copy()
if b.data.ptr == b_data_ptr:
b = b.copy()
if m >= n: # over/well-determined systems
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs, 1, a.data.ptr, m,
b.data.ptr, m)
if b.ndim == 1:
return b[:n]
else:
return b[:n, :]
else: # under-determined systems
a = cupy.asfortranarray(a.conj().T)
if b.ndim == 1:
bb = cupy.empty((n,), dtype=dtype, order='F')
bb[:m] = b
else:
bb = cupy.empty((n, nrhs), dtype=dtype, order='F')
bb[:m, :] = b
b = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, cublas.CUBLAS_SIDE_LEFT,
cublas.CUBLAS_FILL_MODE_UPPER, trans,
cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs, 1, a.data.ptr, n,
b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
cupy.linalg.util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
|
8,054 |
def fromavro(source, limit=None, skip=0, **avro_args):
"""Extract a table from the records of a avro file.
The `source` argument (string or file-like or fastavro.reader) can either
be the path of the file, a file-like input stream or a instance from
fastavro.reader.
The `limit` and `skip` arguments can be used to limit the range of rows
to extract.
The `sample` argument (int, optional) defines how many rows are inspected
for discovering the field types and building a schema for the avro file
when the `schema` argument is not passed.
The rows fields read from file can have scalar values like int, string,
float, datetime, date and decimal but can also have compound types like
enum, :ref:`array <array_schema>`, map, union and record.
The fields types can also have recursive structures defined
in :ref:`complex schemas <complex_schema>`.
Also types with :ref:`logical types <logical_schema>` types are read and
translated to coresponding python types: long timestamp-millis and
long timestamp-micros: datetime.datetime, int date: datetime.date,
bytes decimal and fixed decimal: Decimal, int time-millis and
long time-micros: datetime.time.
Example usage for reading files::
>>> # set up a Avro file to demonstrate with
...
>>> schema1 = {
... 'doc': 'Some people records.',
... 'name': 'People',
... 'namespace': 'test',
... 'type': 'record',
... 'fields': [
... {'name': 'name', 'type': 'string'},
... {'name': 'friends', 'type': 'int'},
... {'name': 'age', 'type': 'int'},
... ]
... }
...
>>> records1 = [
... {'name': 'Bob', 'friends': 42, 'age': 33},
... {'name': 'Jim', 'friends': 13, 'age': 69},
... {'name': 'Joe', 'friends': 86, 'age': 17},
... {'name': 'Ted', 'friends': 23, 'age': 51}
... ]
...
>>> import fastavro
>>> parsed_schema1 = fastavro.parse_schema(schema1)
>>> with open('example-file-to-read.avro', 'wb') as f1:
... fastavro.writer(f1, parsed_schema1, records1)
...
>>> # now demonstrate the use of fromavro()
>>> import petl as etl
>>> tbl1 = etl.fromavro('example-file-to-read.avro')
>>> tbl1
+-------+---------+-----+
| name | friends | age |
+=======+=========+=====+
| 'Bob' | 42 | 33 |
+-------+---------+-----+
| 'Jim' | 13 | 69 |
+-------+---------+-----+
| 'Joe' | 86 | 17 |
+-------+---------+-----+
| 'Ted' | 23 | 51 |
+-------+---------+-----+
.. versionadded:: 1.3.1
"""
source2 = read_source_from_arg(source)
return AvroView(source=source2,
limit=limit,
skip=skip,
**avro_args)
|
def fromavro(source, limit=None, skip=0, **avro_args):
"""Extract a table from the records of a avro file.
The `source` argument (string or file-like or fastavro.reader) can either
be the path of the file, a file-like input stream or a instance from
fastavro.reader.
The `limit` and `skip` arguments can be used to limit the range of rows
to extract.
The `sample` argument (int, optional) defines how many rows are inspected
for discovering the field types and building a schema for the avro file
when the `schema` argument is not passed.
The rows fields read from file can have scalar values like int, string,
float, datetime, date and decimal but can also have compound types like
enum, :ref:`array <array_schema>`, map, union and record.
The fields types can also have recursive structures defined
in :ref:`complex schemas <complex_schema>`.
Also types with :ref:`logical types <logical_schema>` types are read and
translated to coresponding python types: long timestamp-millis and
long timestamp-micros: datetime.datetime, int date: datetime.date,
bytes decimal and fixed decimal: Decimal, int time-millis and
long time-micros: datetime.time.
Example usage for reading files::
>>> # set up a Avro file to demonstrate with
...
>>> schema1 = {
... 'doc': 'Some people records.',
... 'name': 'People',
... 'namespace': 'test',
... 'type': 'record',
... 'fields': [
... {'name': 'name', 'type': 'string'},
... {'name': 'friends', 'type': 'int'},
... {'name': 'age', 'type': 'int'},
... ]
... }
...
>>> records1 = [
... {'name': 'Bob', 'friends': 42, 'age': 33},
... {'name': 'Jim', 'friends': 13, 'age': 69},
... {'name': 'Joe', 'friends': 86, 'age': 17},
... {'name': 'Ted', 'friends': 23, 'age': 51}
... ]
...
>>> import fastavro
>>> parsed_schema1 = fastavro.parse_schema(schema1)
>>> with open('example-file-to-read.avro', 'wb') as f1:
... fastavro.writer(f1, parsed_schema1, records1)
...
>>> # now demonstrate the use of fromavro()
>>> import petl as etl
>>> tbl1 = etl.fromavro('example-file-to-read.avro')
>>> tbl1
+-------+---------+-----+
| name | friends | age |
+=======+=========+=====+
| 'Bob' | 42 | 33 |
+-------+---------+-----+
| 'Jim' | 13 | 69 |
+-------+---------+-----+
| 'Joe' | 86 | 17 |
+-------+---------+-----+
| 'Ted' | 23 | 51 |
+-------+---------+-----+
.. versionadded:: 1.4.0
"""
source2 = read_source_from_arg(source)
return AvroView(source=source2,
limit=limit,
skip=skip,
**avro_args)
|
10,406 |
def main():
arg_spec = dict(
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None, no_log=True),
tags=dict(default=None),
permissions=dict(default=list(), type='list'),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default=None),
update_password=dict(default='on_create', choices=['on_create', 'always'])
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
permissions = module.params['permissions']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
read_priv = module.params['read_priv']
force = module.params['force']
state = module.params['state']
node = module.params['node']
update_password = module.params['update_password']
if permissions:
vhosts = map(lambda permission: permission.get('vhost', '/'), permissions)
if any(map(lambda count: count > 1, count(vhosts).values())):
module.fail_json(msg="Error parsing permissions: You can't have two permission dicts for the same vhost")
bulk_permissions = True
else:
perm = {
'vhost': vhost,
'configure_priv': configure_priv,
'write_priv': write_priv,
'read_priv': read_priv
}
permissions.append(perm)
bulk_permissions = False
rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions,
node, bulk_permissions=bulk_permissions)
result = dict(changed=False, user=username, state=state)
if rabbitmq_user.get():
if state == 'absent':
rabbitmq_user.delete()
result['changed'] = True
else:
if force:
rabbitmq_user.delete()
rabbitmq_user.add()
rabbitmq_user.get()
result['changed'] = True
elif update_password == 'always':
if not rabbitmq_user.check_password():
rabbitmq_user.change_password()
result['changed'] = True
if rabbitmq_user.has_tags_modifications():
rabbitmq_user.set_tags()
result['changed'] = True
if rabbitmq_user.has_permissions_modifications():
rabbitmq_user.set_permissions()
result['changed'] = True
elif state == 'present':
rabbitmq_user.add()
rabbitmq_user.set_tags()
rabbitmq_user.set_permissions()
result['changed'] = True
module.exit_json(**result)
|
def main():
arg_spec = dict(
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None, no_log=True),
tags=dict(default=None),
permissions=dict(default=list(), type='list'),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(type='str'),
update_password=dict(default='on_create', choices=['on_create', 'always'])
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
permissions = module.params['permissions']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
read_priv = module.params['read_priv']
force = module.params['force']
state = module.params['state']
node = module.params['node']
update_password = module.params['update_password']
if permissions:
vhosts = map(lambda permission: permission.get('vhost', '/'), permissions)
if any(map(lambda count: count > 1, count(vhosts).values())):
module.fail_json(msg="Error parsing permissions: You can't have two permission dicts for the same vhost")
bulk_permissions = True
else:
perm = {
'vhost': vhost,
'configure_priv': configure_priv,
'write_priv': write_priv,
'read_priv': read_priv
}
permissions.append(perm)
bulk_permissions = False
rabbitmq_user = RabbitMqUser(module, username, password, tags, permissions,
node, bulk_permissions=bulk_permissions)
result = dict(changed=False, user=username, state=state)
if rabbitmq_user.get():
if state == 'absent':
rabbitmq_user.delete()
result['changed'] = True
else:
if force:
rabbitmq_user.delete()
rabbitmq_user.add()
rabbitmq_user.get()
result['changed'] = True
elif update_password == 'always':
if not rabbitmq_user.check_password():
rabbitmq_user.change_password()
result['changed'] = True
if rabbitmq_user.has_tags_modifications():
rabbitmq_user.set_tags()
result['changed'] = True
if rabbitmq_user.has_permissions_modifications():
rabbitmq_user.set_permissions()
result['changed'] = True
elif state == 'present':
rabbitmq_user.add()
rabbitmq_user.set_tags()
rabbitmq_user.set_permissions()
result['changed'] = True
module.exit_json(**result)
|
42,338 |
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation using a container engine such as podman.
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
27,261 |
def schema(
pairs: Iterable[tuple[str, dt.DataType]]
| dict[str, dt.DataType]
| None = None,
names: Iterable[str] | None = None,
types: Iterable[str | dt.DataType] | None = None,
) -> sch.Schema:
"""Validate and return an :class:`~ibis.expr.schema.Schema` object.
Parameters
----------
pairs
List or dictionary of name, type pairs. Mutually exclusive with `names`
and `types`.
names
Field names. Mutually exclusive with `pairs`.
types
Field types. Mutually exclusive with `pairs`.
Examples
--------
>>> from ibis import schema
>>> sc = schema([('foo', 'string'),
... ('bar', 'int64'),
... ('baz', 'boolean')])
>>> sc2 = schema(names=['foo', 'bar', 'baz'],
... types=['string', 'int64', 'boolean'])
"""
if pairs is not None:
if isinstance(pairs, dict):
return Schema.from_dict(pairs)
return Schema.from_tuples(pairs)
else:
return Schema(names, types)
|
def schema(
pairs: Iterable[tuple[str, dt.DataType]]
| Mapping[str, dt.DataType]
| None = None,
names: Iterable[str] | None = None,
types: Iterable[str | dt.DataType] | None = None,
) -> sch.Schema:
"""Validate and return an :class:`~ibis.expr.schema.Schema` object.
Parameters
----------
pairs
List or dictionary of name, type pairs. Mutually exclusive with `names`
and `types`.
names
Field names. Mutually exclusive with `pairs`.
types
Field types. Mutually exclusive with `pairs`.
Examples
--------
>>> from ibis import schema
>>> sc = schema([('foo', 'string'),
... ('bar', 'int64'),
... ('baz', 'boolean')])
>>> sc2 = schema(names=['foo', 'bar', 'baz'],
... types=['string', 'int64', 'boolean'])
"""
if pairs is not None:
if isinstance(pairs, dict):
return Schema.from_dict(pairs)
return Schema.from_tuples(pairs)
else:
return Schema(names, types)
|
23,642 |
def cloud_opacity_factor(irr_dif_clouds: np.ndarray,
irr_dir_clouds: np.ndarray,
irr_ghi_clouds: np.ndarray,
spectra: dict) -> (np.ndarray, np.ndarray):
"""
Calculate the effect of "cloud opacity factor" on spectral
irradiance under clear sky.
First we calculate the rho fraction based on campbell_norman
irradiance with clouds converted to POA irradiance. In the
paper [1] these values are obtained from observations. The equations
used for calculating cloud opacity factor to scale the clear sky
spectral estimates using spectrl2. Results can be compared
with sun calculator:
https://www2.pvlighthouse.com.au/calculators/solar%20
spectrum%20calculator/solar%20spectrum%20calculator.aspx
Parameters
----------
irr_dif_clouds:np.ndarray
Total diffuse irradiance (poa_diffuse) estimated using
`pvlib.irradiance.get_total_irradiance` and
`pvlib.irradiance.campbell_norman` irradiance with clouds
(transmittance)
irr_dir_clouds:np.ndarray
Total direct irradiance (poa_direct) estimated using
`pvlib.irradiance.get_total_irradiance` and
`pvlib.irradiance.campbell_norman` irradiance with
clouds (transmittance)
irr_ghi_clouds:np.ndarray
Total direct irradiance (poa_global) estimated
using `pvlib.irradiance.get_total_irradiance` and
`pvlib.irradiance.campbell_norman irradiance
with clouds (transmittance)
spectra:np.ndarray
Spectral irradiance output from `pvlib.spectrum.spectrl2`
under clear-sky conditions
Returns
-------
f_dir, f_diff spectral direct and diffuse irradiance
scaled for cloudiness
References
----------
.. [1] Ref: Marco Ernst, Hendrik Holst, Matthias Winter,
Pietro P. Altermatt,
SunCalculator: A program to calculate the angular and spectral
distribution of direct and diffuse solar radiation, Solar Energy
Materials and Solar Cells, Volume 157, 2016,
Pages 913-922,
"""
rho = irr_dif_clouds / irr_ghi_clouds
wl = spectra['wavelength']
irr_diff_s = np.trapz(y=spectra['poa_sky_diffuse'][:, 0], x=wl)
irr_dir_s = np.trapz(y=spectra['poa_direct'][:, 0], x=wl)
irr_glob_s = np.trapz(y=spectra['poa_global'][:, 0], x=wl)
rho_spectra = irr_diff_s / irr_glob_s
n_rho = (rho - rho_spectra) / (1 - rho_spectra)
# Direct light. Equation 6 Ernst et al. 2016
f_diff_s = spectra['poa_sky_diffuse'][:, :]
f_dir_s = spectra['poa_direct'][:, :]
f_dir = (f_dir_s / irr_dir_s) * irr_dir_clouds
# Diffuse light scaling factor. Equation 7 Ernst et al. 2016
s_diff = ((1 - n_rho) * (f_diff_s / irr_diff_s) + n_rho
* ((f_dir_s + f_diff_s) / irr_glob_s))
# Equation 8 Ernst et al. 2016
f_diff = s_diff * irr_dif_clouds
return f_dir, f_diff
|
def cloud_opacity_factor(irr_dif_clouds: np.ndarray,
irr_dir_clouds: np.ndarray,
irr_ghi_clouds: np.ndarray,
spectra: dict) -> (np.ndarray, np.ndarray):
"""
Calculate the effect of "cloud opacity factor" on spectral
irradiance under clear sky.
First we calculate the rho fraction based on campbell_norman
irradiance with clouds converted to POA irradiance. In the
paper [1]_ these values are obtained from observations. The equations
used for calculating cloud opacity factor to scale the clear sky
spectral estimates using spectrl2. Results can be compared
with sun calculator:
https://www2.pvlighthouse.com.au/calculators/solar%20
spectrum%20calculator/solar%20spectrum%20calculator.aspx
Parameters
----------
irr_dif_clouds:np.ndarray
Total diffuse irradiance (poa_diffuse) estimated using
`pvlib.irradiance.get_total_irradiance` and
`pvlib.irradiance.campbell_norman` irradiance with clouds
(transmittance)
irr_dir_clouds:np.ndarray
Total direct irradiance (poa_direct) estimated using
`pvlib.irradiance.get_total_irradiance` and
`pvlib.irradiance.campbell_norman` irradiance with
clouds (transmittance)
irr_ghi_clouds:np.ndarray
Total direct irradiance (poa_global) estimated
using `pvlib.irradiance.get_total_irradiance` and
`pvlib.irradiance.campbell_norman irradiance
with clouds (transmittance)
spectra:np.ndarray
Spectral irradiance output from `pvlib.spectrum.spectrl2`
under clear-sky conditions
Returns
-------
f_dir, f_diff spectral direct and diffuse irradiance
scaled for cloudiness
References
----------
.. [1] Ref: Marco Ernst, Hendrik Holst, Matthias Winter,
Pietro P. Altermatt,
SunCalculator: A program to calculate the angular and spectral
distribution of direct and diffuse solar radiation, Solar Energy
Materials and Solar Cells, Volume 157, 2016,
Pages 913-922,
"""
rho = irr_dif_clouds / irr_ghi_clouds
wl = spectra['wavelength']
irr_diff_s = np.trapz(y=spectra['poa_sky_diffuse'][:, 0], x=wl)
irr_dir_s = np.trapz(y=spectra['poa_direct'][:, 0], x=wl)
irr_glob_s = np.trapz(y=spectra['poa_global'][:, 0], x=wl)
rho_spectra = irr_diff_s / irr_glob_s
n_rho = (rho - rho_spectra) / (1 - rho_spectra)
# Direct light. Equation 6 Ernst et al. 2016
f_diff_s = spectra['poa_sky_diffuse'][:, :]
f_dir_s = spectra['poa_direct'][:, :]
f_dir = (f_dir_s / irr_dir_s) * irr_dir_clouds
# Diffuse light scaling factor. Equation 7 Ernst et al. 2016
s_diff = ((1 - n_rho) * (f_diff_s / irr_diff_s) + n_rho
* ((f_dir_s + f_diff_s) / irr_glob_s))
# Equation 8 Ernst et al. 2016
f_diff = s_diff * irr_dif_clouds
return f_dir, f_diff
|
23,679 |
def calc_surface_orientation(tracker_theta, axis_tilt=0, axis_azimuth=0):
"""
Calculate the surface tilt and azimuth angles for a given tracker rotation.
Parameters
----------
tracker_theta : numeric
Tracker rotation angle [degrees]
axis_tilt : float, default 0
The tilt of the axis of rotation with respect to horizontal [degrees]
axis_azimuth : float, default 0
A value denoting the compass direction along which the axis of
rotation lies. Measured east of north. [degrees]
Returns
-------
dict or DataFrame
Contains keys ``'surface_tilt'`` and ``'surface_azimuth'`` representing
the module orientation accounting for tracker rotation and axis
orientation. [degrees]
References
----------
.. [1] William F Marion and Aron P Dobos, "Rotation Angle for the Optimum
Tracking of One-Axis Trackers", Technical Report NREL/TP-6A20-58891,
July 2013. :doi:`10.2172/1089596`
"""
with np.errstate(invalid='ignore', divide='ignore'):
surface_tilt = acosd(cosd(tracker_theta) * cosd(axis_tilt))
# clip(..., -1, +1) to prevent arcsin(1 + epsilon) issues:
azimuth_delta = asind(np.clip(sind(tracker_theta) / sind(surface_tilt),
a_min=-1, a_max=1))
# Combine Eqs 2, 3, and 4:
azimuth_delta = np.where(abs(tracker_theta) < 90,
azimuth_delta,
-azimuth_delta + np.sign(tracker_theta) * 180)
# handle surface_tilt=0 case:
azimuth_delta = np.where(sind(surface_tilt) != 0, azimuth_delta, 90)
surface_azimuth = (axis_azimuth + azimuth_delta) % 360
out = {
'surface_tilt': surface_tilt,
'surface_azimuth': surface_azimuth,
}
if hasattr(tracker_theta, 'index'):
out = pd.DataFrame(out)
return out
|
def calc_surface_orientation(tracker_theta, axis_tilt=0, axis_azimuth=0):
"""
Calculate the surface tilt and azimuth angles for a given tracker rotation.
Parameters
----------
tracker_theta : numeric
Tracker rotation angle [degrees]
axis_tilt : float, default 0
The tilt of the axis of rotation with respect to horizontal [degrees]
axis_azimuth : float, default 0
A value denoting the compass direction along which the axis of
rotation lies. Measured east of north. [degree]
Returns
-------
dict or DataFrame
Contains keys ``'surface_tilt'`` and ``'surface_azimuth'`` representing
the module orientation accounting for tracker rotation and axis
orientation. [degrees]
References
----------
.. [1] William F Marion and Aron P Dobos, "Rotation Angle for the Optimum
Tracking of One-Axis Trackers", Technical Report NREL/TP-6A20-58891,
July 2013. :doi:`10.2172/1089596`
"""
with np.errstate(invalid='ignore', divide='ignore'):
surface_tilt = acosd(cosd(tracker_theta) * cosd(axis_tilt))
# clip(..., -1, +1) to prevent arcsin(1 + epsilon) issues:
azimuth_delta = asind(np.clip(sind(tracker_theta) / sind(surface_tilt),
a_min=-1, a_max=1))
# Combine Eqs 2, 3, and 4:
azimuth_delta = np.where(abs(tracker_theta) < 90,
azimuth_delta,
-azimuth_delta + np.sign(tracker_theta) * 180)
# handle surface_tilt=0 case:
azimuth_delta = np.where(sind(surface_tilt) != 0, azimuth_delta, 90)
surface_azimuth = (axis_azimuth + azimuth_delta) % 360
out = {
'surface_tilt': surface_tilt,
'surface_azimuth': surface_azimuth,
}
if hasattr(tracker_theta, 'index'):
out = pd.DataFrame(out)
return out
|
42,681 |
def _validate_blockchain_account_schemas(
data: Dict[str, Any],
address_getter: Callable,
) -> None:
"""Validates schema input for the PUT/PATCH/DELETE on blockchain account data"""
# Make sure no duplicates addresses are given
given_addresses = set()
# Make sure EVM based addresses are checksummed
if data['blockchain'] in (SupportedBlockchain.ETHEREUM, SupportedBlockchain.AVALANCHE):
for account_data in data['accounts']:
address_string = address_getter(account_data)
if not address_string.endswith('.eth'):
# Make sure that given value is an ethereum address
try:
address = to_checksum_address(address_string)
except (ValueError, TypeError) as e:
raise ValidationError(
f'Given value {address_string} is not an ethereum address',
field_name='address',
) from e
else:
# else it's ENS name and will be checked in the transformation step and not here
address = address_string
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure bitcoin addresses are valid
elif data['blockchain'] == SupportedBlockchain.BITCOIN:
for account_data in data['accounts']:
address = address_getter(account_data)
# ENS domain will be checked in the transformation step
if not address.endswith('.eth') and not is_valid_btc_address(address):
raise ValidationError(
f'Given value {address} is not a valid bitcoin address',
field_name='address',
)
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure bitcoin cash addresses are valid
elif data['blockchain'] == SupportedBlockchain.BITCOIN_CASH:
for account_data in data['accounts']:
address = address_getter(account_data)
# Check whether its a valid BCH CashAddr
if address.startswith('bitcoincash:') and not is_valid_bitcoin_cash_address(address):
raise ValidationError(
f'Given value {address} is not a valid bitcoin cash address',
field_name='address',
)
# Check to see whether its a valid BCH legacy address
if (not address.startswith('bitcoincash:') and address.endswith('.eth')) and not is_valid_btc_address(address): # noqa: 501
raise ValidationError(
f'Given value {address} is not a valid bitcoin cash address',
field_name='address',
)
# Check if they're no duplicates of same address but in different formats
if force_address_to_legacy_address(address) in force_addresses_to_legacy_addresses(given_addresses): # noqa: 501
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure kusama addresses are valid (either ss58 format or ENS domain)
elif data['blockchain'] == SupportedBlockchain.KUSAMA:
for account_data in data['accounts']:
address = address_getter(account_data)
# ENS domain will be checked in the transformation step
if not address.endswith('.eth') and not is_valid_kusama_address(address):
raise ValidationError(
f'Given value {address} is not a valid kusama address',
field_name='address',
)
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure polkadot addresses are valid (either ss58 format or ENS domain)
elif data['blockchain'] == SupportedBlockchain.POLKADOT:
for account_data in data['accounts']:
address = address_getter(account_data)
# ENS domain will be checked in the transformation step
if not address.endswith('.eth') and not is_valid_polkadot_address(address):
raise ValidationError(
f'Given value {address} is not a valid polkadot address',
field_name='address',
)
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
|
def _validate_blockchain_account_schemas(
data: Dict[str, Any],
address_getter: Callable,
) -> None:
"""Validates schema input for the PUT/PATCH/DELETE on blockchain account data"""
# Make sure no duplicates addresses are given
given_addresses = set()
# Make sure EVM based addresses are checksummed
if data['blockchain'] in (SupportedBlockchain.ETHEREUM, SupportedBlockchain.AVALANCHE):
for account_data in data['accounts']:
address_string = address_getter(account_data)
if not address_string.endswith('.eth'):
# Make sure that given value is an ethereum address
try:
address = to_checksum_address(address_string)
except (ValueError, TypeError) as e:
raise ValidationError(
f'Given value {address_string} is not an ethereum address',
field_name='address',
) from e
else:
# else it's ENS name and will be checked in the transformation step and not here
address = address_string
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure bitcoin addresses are valid
elif data['blockchain'] == SupportedBlockchain.BITCOIN:
for account_data in data['accounts']:
address = address_getter(account_data)
# ENS domain will be checked in the transformation step
if not address.endswith('.eth') and not is_valid_btc_address(address):
raise ValidationError(
f'Given value {address} is not a valid bitcoin address',
field_name='address',
)
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure bitcoin cash addresses are valid
elif data['blockchain'] == SupportedBlockchain.BITCOIN_CASH:
for account_data in data['accounts']:
address = address_getter(account_data)
# Check whether its a valid BCH CashAddr
if address.startswith('bitcoincash:') and not is_valid_bitcoin_cash_address(address):
raise ValidationError(
f'Given value {address} is not a valid bitcoin cash address',
field_name='address',
)
# Check to see whether its a valid BCH legacy address
if (not address.startswith('bitcoincash:') and address.endswith('.eth')) and not is_valid_btc_address(address): # noqa: 501
raise ValidationError(
f'Given value {address} is not a valid bitcoin cash address',
field_name='address',
)
# Check if they're not duplicates of same address but in different formats
if force_address_to_legacy_address(address) in force_addresses_to_legacy_addresses(given_addresses): # noqa: 501
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure kusama addresses are valid (either ss58 format or ENS domain)
elif data['blockchain'] == SupportedBlockchain.KUSAMA:
for account_data in data['accounts']:
address = address_getter(account_data)
# ENS domain will be checked in the transformation step
if not address.endswith('.eth') and not is_valid_kusama_address(address):
raise ValidationError(
f'Given value {address} is not a valid kusama address',
field_name='address',
)
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
# Make sure polkadot addresses are valid (either ss58 format or ENS domain)
elif data['blockchain'] == SupportedBlockchain.POLKADOT:
for account_data in data['accounts']:
address = address_getter(account_data)
# ENS domain will be checked in the transformation step
if not address.endswith('.eth') and not is_valid_polkadot_address(address):
raise ValidationError(
f'Given value {address} is not a valid polkadot address',
field_name='address',
)
if address in given_addresses:
raise ValidationError(
f'Address {address} appears multiple times in the request data',
field_name='address',
)
given_addresses.add(address)
|
29,779 |
def populate_intended_for(path_to_bids_session):
"""
Adds the 'IntendedFor' field to the fmap .json files in a session folder.
It goes through the session folders and checks what runs have the same
'ShimSetting' as the fmaps. If there is no 'ShimSetting' field in the json
file, we'll use the folder name ('func', 'dwi', 'anat') and see which fmap
with a matching '_acq' entity.
If several fmap runs have the same 'ShimSetting' (or '_acq'), it will use
the first one. Because fmaps come in groups (with reversed PE polarity,
or magnitude/phase), it adds the same runs to the 'IntendedFor' of the
corresponding fmaps by checking the '_acq' and '_run' entities.
Note: the logic behind the way we decide how to populate the "IntendedFor"
is: we want all images in the session (except for the fmap images
themselves) to have AT MOST one fmap. (That is, a pair of SE EPI with
reversed polarities, or a magnitude a phase field map). If there are more
than one fmap (more than a fmap pair) with the same acquisition parameters
as, say, a functional run, we will just assign that run to the FIRST pair,
while leaving the other fmap pairs without any assigned images. If the
user's intentions were different, he/she will have to manually edit the
fmap json files.
Parameters:
----------
path_to_bids_session : str or os.path
path to the session folder (or to the subject folder, if there are no
sessions).
"""
lgr.info('')
lgr.info('Adding "IntendedFor" to the fieldmaps in {}.'.format(path_to_bids_session))
# Resolve path (eliminate '..')
path_to_bids_session = op.abspath(path_to_bids_session)
# get the BIDS folder (if "data_folder" includes the session, remove it):
if op.basename(path_to_bids_session).startswith('ses-'):
bids_folder = op.dirname(path_to_bids_session)
else:
bids_folder = path_to_bids_session
fmap_dir = op.join(path_to_bids_session, 'fmap')
if not op.exists(fmap_dir):
lgr.warning('Fmap folder not found in {}.'.format(path_to_bids_session))
lgr.warning('We cannot add the IntendedFor field')
return
# Get a list of all fmap json files in the session:
# (we will remove elements later on, so don't just iterate)
fmap_jsons = sorted([j for j in glob(op.join(path_to_bids_session, 'fmap/*.json'))])
# Get a set with all non-fmap json files in the session (set is easier):
# We also exclude the SBRef files.
session_jsons = set(
j for j in glob(op.join(path_to_bids_session, '*/*.json')) if not (
j in fmap_jsons
# j[:-5] removes the '.json' from the end
or j[:-5].endswith('_sbref')
)
)
# Loop through all the fmap json files and, for each one, find which other
# non-fmap images in the session have the same shim settings. Those that
# match are added to the intended_for list and removed from the list of
# non-fmap json files in the session (since they have already assigned to
# a fmap).
# After finishing with all the non-fmap images in the session, we go back
# to the fmap json file list, and find any other fmap json files of the
# same acquisition type and run number (because fmaps have several files:
# normal- and reversed-polarity, or magnitude and phase, etc.) We add the
# same IntendedFor list to those other corresponding fmap json files, and
# remove them from the list of available fmap json files.
# Once we have gone through all the fmap json files, we are done.
runs_accounted_for = set()
fmaps_accounted_for = set()
for fm_json in fmap_jsons:
if fm_json not in fmaps_accounted_for:
lgr.debug('Looking for runs for {}'.format(fm_json))
fm_shims = get_shim_setting(fm_json)
intended_for = []
for image_json in session_jsons:
image_shims = get_shim_setting(image_json)
if image_shims == fm_shims:
# BIDS specifies that the intended for are:
# - **image** files
# - path relative to the **subject level**
image_json_relative_path = op.relpath(image_json, start=bids_folder)
# image_json_relative_path[:-5] removes the '.json' extension:
intended_for.append(
image_json_relative_path[:-5] + '.nii.gz'
)
runs_accounted_for.add(image_json)
if len(intended_for) > 0:
intended_for = sorted([str(f) for f in intended_for])
# find all fmap json files with the same <acq> and <run> entities:
fm_json_name = op.basename(fm_json)
acq_match = re.findall('([/_]acq-([a-zA-Z0-9]*))', fm_json_name)
acq_str = acq_match[0][0] if acq_match else ''
run_match = re.findall('([/_]run-([a-zA-Z0-9]*))', fm_json_name)
run_str = run_match[0][0] if run_match else ''
# Loop through all the files that have the same "acq-" and "run-"
# Note: the following loop will also include 'fm_json'
for linked_fm_json in glob(op.join(path_to_bids_session, 'fmap/*' + acq_str + '*' + run_str + '*.json')):
# add the IntendedFor field to the json file:
add_field_to_json(linked_fm_json, {"IntendedFor": intended_for})
fmaps_accounted_for.update({linked_fm_json})
# Remove the runs accounted for from the session_jsons list, so that
# we don't assign another fmap to this image:
session_jsons -= runs_accounted_for
|
def populate_intended_for(path_to_bids_session):
"""
Adds the 'IntendedFor' field to the fmap .json files in a session folder.
It goes through the session folders and checks what runs have the same
'ShimSetting' as the fmaps. If there is no 'ShimSetting' field in the json
file, we'll use the folder name ('func', 'dwi', 'anat') and see which fmap
with a matching '_acq' entity.
If several fmap runs have the same 'ShimSetting' (or '_acq'), it will use
the first one. Because fmaps come in groups (with reversed PE polarity,
or magnitude/phase), it adds the same runs to the 'IntendedFor' of the
corresponding fmaps by checking the '_acq' and '_run' entities.
Note: the logic behind the way we decide how to populate the "IntendedFor"
is: we want all images in the session (except for the fmap images
themselves) to have AT MOST one fmap. (That is, a pair of SE EPI with
reversed polarities, or a magnitude a phase field map). If there are more
than one fmap (more than a fmap pair) with the same acquisition parameters
as, say, a functional run, we will just assign that run to the FIRST pair,
while leaving the other fmap pairs without any assigned images. If the
user's intentions were different, he/she will have to manually edit the
fmap json files.
Parameters:
----------
path_to_bids_session : str or os.path
path to the session folder (or to the subject folder, if there are no
sessions).
"""
lgr.info('')
lgr.info('Adding "IntendedFor" to the fieldmaps in {}.'.format(path_to_bids_session))
# Resolve path (eliminate '..')
path_to_bids_session = op.abspath(path_to_bids_session)
# get the BIDS folder (if "data_folder" includes the session, remove it):
if op.basename(path_to_bids_session).startswith('ses-'):
bids_folder = op.dirname(path_to_bids_session)
else:
bids_folder = path_to_bids_session
fmap_dir = op.join(path_to_bids_session, 'fmap')
if not op.exists(fmap_dir):
lgr.warning('Fmap folder not found in {}.'.format(path_to_bids_session))
lgr.warning('We cannot add the IntendedFor field')
return
# Get a list of all fmap json files in the session:
# (we will remove elements later on, so don't just iterate)
fmap_jsons = sorted(glob(op.join(path_to_bids_session, 'fmap/*.json')))
# Get a set with all non-fmap json files in the session (set is easier):
# We also exclude the SBRef files.
session_jsons = set(
j for j in glob(op.join(path_to_bids_session, '*/*.json')) if not (
j in fmap_jsons
# j[:-5] removes the '.json' from the end
or j[:-5].endswith('_sbref')
)
)
# Loop through all the fmap json files and, for each one, find which other
# non-fmap images in the session have the same shim settings. Those that
# match are added to the intended_for list and removed from the list of
# non-fmap json files in the session (since they have already assigned to
# a fmap).
# After finishing with all the non-fmap images in the session, we go back
# to the fmap json file list, and find any other fmap json files of the
# same acquisition type and run number (because fmaps have several files:
# normal- and reversed-polarity, or magnitude and phase, etc.) We add the
# same IntendedFor list to those other corresponding fmap json files, and
# remove them from the list of available fmap json files.
# Once we have gone through all the fmap json files, we are done.
runs_accounted_for = set()
fmaps_accounted_for = set()
for fm_json in fmap_jsons:
if fm_json not in fmaps_accounted_for:
lgr.debug('Looking for runs for {}'.format(fm_json))
fm_shims = get_shim_setting(fm_json)
intended_for = []
for image_json in session_jsons:
image_shims = get_shim_setting(image_json)
if image_shims == fm_shims:
# BIDS specifies that the intended for are:
# - **image** files
# - path relative to the **subject level**
image_json_relative_path = op.relpath(image_json, start=bids_folder)
# image_json_relative_path[:-5] removes the '.json' extension:
intended_for.append(
image_json_relative_path[:-5] + '.nii.gz'
)
runs_accounted_for.add(image_json)
if len(intended_for) > 0:
intended_for = sorted([str(f) for f in intended_for])
# find all fmap json files with the same <acq> and <run> entities:
fm_json_name = op.basename(fm_json)
acq_match = re.findall('([/_]acq-([a-zA-Z0-9]*))', fm_json_name)
acq_str = acq_match[0][0] if acq_match else ''
run_match = re.findall('([/_]run-([a-zA-Z0-9]*))', fm_json_name)
run_str = run_match[0][0] if run_match else ''
# Loop through all the files that have the same "acq-" and "run-"
# Note: the following loop will also include 'fm_json'
for linked_fm_json in glob(op.join(path_to_bids_session, 'fmap/*' + acq_str + '*' + run_str + '*.json')):
# add the IntendedFor field to the json file:
add_field_to_json(linked_fm_json, {"IntendedFor": intended_for})
fmaps_accounted_for.update({linked_fm_json})
# Remove the runs accounted for from the session_jsons list, so that
# we don't assign another fmap to this image:
session_jsons -= runs_accounted_for
|
38,384 |
def load_archive(
fn: str, path: str, ratarmount_kwa: dict = None, *args, **kwargs
) -> Dataset:
if ratarmount_kwa is None:
ratarmount_kwa = {}
from yt.utilities.on_demand_imports import _ratarmount
with open(fn, mode="br") as fd:
compression = _ratarmount.SQLiteIndexedTar._detectCompression(fd)
if compression is None:
YTUnidentifiedDataType(fn, *args, **kwargs)
# Note: the temporary directory will be created
# by ratarmount
tempdir = fn + ".mount"
tempdir_base = tempdir
i = 0
while os.path.exists(tempdir):
i += 1
tempdir = f"{tempdir_base}.{i}"
def mount(filename, mnt_dir):
mylog.info("Mounting archive into %s", mnt_dir)
_ratarmount.cli([filename, mnt_dir])
def umount(mnt_dir):
mylog.info("Unmounting archive %s", mnt_dir)
call(["umount", mnt_dir])
# Mount the archive
proc = Process(target=mount, args=(fn, tempdir))
proc.start()
proc.join()
# At exit of the interpreter, unmount
atexit.register(umount, mnt_dir=tempdir)
return load(os.path.join(tempdir, path), *args, **kwargs)
|
def load_archive(
fn: str, path: str, ratarmount_kwa: Optional[Dict] = None, *args, **kwargs
) -> Dataset:
if ratarmount_kwa is None:
ratarmount_kwa = {}
from yt.utilities.on_demand_imports import _ratarmount
with open(fn, mode="br") as fd:
compression = _ratarmount.SQLiteIndexedTar._detectCompression(fd)
if compression is None:
YTUnidentifiedDataType(fn, *args, **kwargs)
# Note: the temporary directory will be created
# by ratarmount
tempdir = fn + ".mount"
tempdir_base = tempdir
i = 0
while os.path.exists(tempdir):
i += 1
tempdir = f"{tempdir_base}.{i}"
def mount(filename, mnt_dir):
mylog.info("Mounting archive into %s", mnt_dir)
_ratarmount.cli([filename, mnt_dir])
def umount(mnt_dir):
mylog.info("Unmounting archive %s", mnt_dir)
call(["umount", mnt_dir])
# Mount the archive
proc = Process(target=mount, args=(fn, tempdir))
proc.start()
proc.join()
# At exit of the interpreter, unmount
atexit.register(umount, mnt_dir=tempdir)
return load(os.path.join(tempdir, path), *args, **kwargs)
|
26,781 |
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
include_object=include_object,
render_as_batch=True
)
with context.begin_transaction():
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info[0] == 5 and \
connection.dialect.server_version_info[1] == 7:
connection.execute(
"select GET_LOCK('alembic',1800);"
)
context.run_migrations()
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info[0] == 5 and \
connection.dialect.server_version_info[1] == 7:
connection.execute(
"select RELEASE_LOCK('alembic');"
)
|
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = settings.engine
with connectable.connect() as connection:
context.configure(
connection=connection,
transaction_per_migration=True,
target_metadata=target_metadata,
compare_type=COMPARE_TYPE,
include_object=include_object,
render_as_batch=True
)
with context.begin_transaction():
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info >= (5, 6):
connection.execute(
"select GET_LOCK('alembic',1800);"
)
context.run_migrations()
if connection.dialect.name == 'mysql' and \
connection.dialect.server_version_info[0] == 5 and \
connection.dialect.server_version_info[1] == 7:
connection.execute(
"select RELEASE_LOCK('alembic');"
)
|
22,697 |
def open(file_path, flags, mode=0o777): # pylint: disable=redefined-builtin
# type: (str, int, int) -> int
"""
Wrapper of original os.open function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int flags: Flags to apply on file while opened
:param int mode: POSIX mode to apply on file when opened,
Python defaults will be applied if ``None``
:returns: the file descriptor to the opened file
:rtype: int
"""
file_descriptor = os.open(file_path, flags, mode)
# TODO: Change to security.chmod once all logic of windows files permissions has been merged
os.chmod(file_path, mode)
return file_descriptor
|
def open(file_path, flags, mode=0o777): # pylint: disable=redefined-builtin
# type: (str, int, int) -> int
"""
Wrapper of original os.open function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int flags: Flags to apply on file while opened
:param int mode: POSIX mode to apply on file when opened,
Python defaults will be applied if ``None``
:returns: the file descriptor to the opened file
:rtype: int
"""
file_descriptor = os.open(file_path, flags, mode)
# TODO: Change to filesystem.chmod once all logic of windows files permissions has been merged
os.chmod(file_path, mode)
return file_descriptor
|
52,933 |
def mock_env(tmpdir, docker_tasker, platform='x86_64', base_layers=0,
remote_sources=REMOTE_SOURCES, r_c_m_override=None, pnc_artifacts=True,
): # pylint: disable=W0102
inspection_data = {
INSPECT_ROOTFS: {
INSPECT_ROOTFS_LAYERS: list(range(base_layers))
}
}
if r_c_m_override is None:
r_c_m = {
'version': 1,
'cachito': {
'api_url': CACHITO_URL,
'auth': {
'ssl_certs_dir': str(tmpdir),
},
},
'pnc': {
'base_api_url': PNC_ROOT,
'get_artifact_path': 'artifacts/{}',
},
}
else:
r_c_m = r_c_m_override
env = (MockEnv()
.for_plugin('prebuild', AddImageContentManifestPlugin.key,
{'remote_sources': remote_sources})
.set_reactor_config(r_c_m)
.make_orchestrator()
)
if pnc_artifacts:
env.workflow.prebuild_results[PLUGIN_FETCH_MAVEN_KEY] = {'pnc_artifact_ids':
[PNC_ARTIFACT['id']]}
tmpdir.join('cert').write('')
env.workflow.builder.set_inspection_data(inspection_data)
env.workflow.user_params['platform'] = platform
# Ensure to succeed in reading the content_sets.yml
env.workflow.source.get_build_file_path = lambda: (str(tmpdir), str(tmpdir))
return env.create_runner(docker_tasker)
|
def mock_env(tmpdir, docker_tasker, platform='x86_64', base_layers=0,
remote_sources=REMOTE_SOURCES, r_c_m_override=None, pnc_artifacts=True,
): # pylint: disable=W0102
inspection_data = {
INSPECT_ROOTFS: {
INSPECT_ROOTFS_LAYERS: list(range(base_layers))
}
}
if r_c_m_override is None:
r_c_m = {
'version': 1,
'cachito': {
'api_url': CACHITO_URL,
'auth': {
'ssl_certs_dir': str(tmpdir),
},
},
'pnc': {
'base_api_url': PNC_ROOT,
'get_artifact_path': 'artifacts/{}',
},
}
else:
r_c_m = r_c_m_override
env = (MockEnv()
.for_plugin('prebuild', AddImageContentManifestPlugin.key,
{'remote_sources': remote_sources})
.set_reactor_config(r_c_m)
.make_orchestrator()
)
if pnc_artifacts:
env.set_plugin_result(
'prebuild', PLUGIN_FETCH_MAVEN_KEY, {'pnc_artifact_ids': [PNC_ARTIFACT['id']]}
)
tmpdir.join('cert').write('')
env.workflow.builder.set_inspection_data(inspection_data)
env.workflow.user_params['platform'] = platform
# Ensure to succeed in reading the content_sets.yml
env.workflow.source.get_build_file_path = lambda: (str(tmpdir), str(tmpdir))
return env.create_runner(docker_tasker)
|
8,401 |
def spectrum_from_column_mapping(table, column_mapping, wcs=None):
"""
Given a table and a mapping of the table column names to attributes
on the Spectrum1D object, parse the information into a Spectrum1D.
Parameters
----------
table : :class:`~astropy.table.Table`
The table object (e.g. returned from `Table.read('data_file')`).
column_mapping : dict
A dictionary describing the relation between the table columns
and the arguments of the `Spectrum1D` class, along with unit
information. The dictionary keys should be the table column names
while the values should be a two-tuple where the first element is the
associated `Spectrum1D` keyword argument, and the second element is the
unit for the file column (or `None` to take unit from the table)::
column_mapping = {'FLUX': ('flux', 'Jy'),
'WAVE': ('spectral_axis', 'um')}
wcs : :class:`~astropy.wcs.WCS` or :class:`gwcs.WCS`
WCS object passed to the Spectrum1D initializer.
"""
spec_kwargs = {}
# Associate columns of the file with the appropriate spectrum1d arguments
for col_name, (kwarg_name, cm_unit) in column_mapping.items():
# If the table object couldn't parse any unit information,
# fallback to the column mapper defined unit
tab_unit = table[col_name].unit
if tab_unit and cm_unit is not None:
# If the table unit is defined, retrieve the quantity array for
# the column
kwarg_val = u.Quantity(table[col_name], tab_unit)
# Attempt to convert the table unit to the user-defined unit.
logging.debug("Attempting auto-convert of table unit '%s' to "
"user-provided unit '%s'.", tab_unit, cm_unit)
if not isinstance(cm_unit, u.Unit):
cm_unit = u.Unit(cm_unit)
if cm_unit.physical_type in ('length', 'frequency'):
# Spectral axis column information
kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral())
elif 'spectral flux' in cm_unit.physical_type:
# Flux/error column information
kwarg_val = kwarg_val.to(
cm_unit, equivalencies=u.spectral_density(1 * u.AA))
elif tab_unit:
# The user has provided no unit in the column mapping, so we
# use the unit as defined in the table object.
kwarg_val = u.Quantity(table[col_name], tab_unit)
elif cm_unit is not None:
# In this case, the user has defined a unit in the column mapping
# but no unit has been defined in the table object.
kwarg_val = u.Quantity(table[col_name], cm_unit)
else:
# Neither the column mapping nor the table contain unit information.
# This may be desired e.g. for the mask or bit flag arrays.
kwarg_val = table[col_name]
spec_kwargs.setdefault(kwarg_name, kwarg_val)
# Ensure that the uncertainties are a subclass of NDUncertainty
if spec_kwargs.get('uncertainty') is not None:
spec_kwargs['uncertainty'] = StdDevUncertainty(
spec_kwargs.get('uncertainty'))
return Spectrum1D(**spec_kwargs, wcs=wcs, meta=table.meta)
|
def spectrum_from_column_mapping(table, column_mapping, wcs=None):
"""
Given a table and a mapping of the table column names to attributes
on the Spectrum1D object, parse the information into a Spectrum1D.
Parameters
----------
table : :class:`~astropy.table.Table`
The table object (e.g. returned from `Table.read('data_file')`).
column_mapping : dict
A dictionary describing the relation between the table columns
and the arguments of the `Spectrum1D` class, along with unit
information. The dictionary keys should be the table column names
while the values should be a two-tuple where the first element is the
associated `Spectrum1D` keyword argument, and the second element is the
unit for the file column (or ``None`` to take unit from the table)::
column_mapping = {'FLUX': ('flux', 'Jy'),
'WAVE': ('spectral_axis', 'um')}
wcs : :class:`~astropy.wcs.WCS` or :class:`gwcs.WCS`
WCS object passed to the Spectrum1D initializer.
"""
spec_kwargs = {}
# Associate columns of the file with the appropriate spectrum1d arguments
for col_name, (kwarg_name, cm_unit) in column_mapping.items():
# If the table object couldn't parse any unit information,
# fallback to the column mapper defined unit
tab_unit = table[col_name].unit
if tab_unit and cm_unit is not None:
# If the table unit is defined, retrieve the quantity array for
# the column
kwarg_val = u.Quantity(table[col_name], tab_unit)
# Attempt to convert the table unit to the user-defined unit.
logging.debug("Attempting auto-convert of table unit '%s' to "
"user-provided unit '%s'.", tab_unit, cm_unit)
if not isinstance(cm_unit, u.Unit):
cm_unit = u.Unit(cm_unit)
if cm_unit.physical_type in ('length', 'frequency'):
# Spectral axis column information
kwarg_val = kwarg_val.to(cm_unit, equivalencies=u.spectral())
elif 'spectral flux' in cm_unit.physical_type:
# Flux/error column information
kwarg_val = kwarg_val.to(
cm_unit, equivalencies=u.spectral_density(1 * u.AA))
elif tab_unit:
# The user has provided no unit in the column mapping, so we
# use the unit as defined in the table object.
kwarg_val = u.Quantity(table[col_name], tab_unit)
elif cm_unit is not None:
# In this case, the user has defined a unit in the column mapping
# but no unit has been defined in the table object.
kwarg_val = u.Quantity(table[col_name], cm_unit)
else:
# Neither the column mapping nor the table contain unit information.
# This may be desired e.g. for the mask or bit flag arrays.
kwarg_val = table[col_name]
spec_kwargs.setdefault(kwarg_name, kwarg_val)
# Ensure that the uncertainties are a subclass of NDUncertainty
if spec_kwargs.get('uncertainty') is not None:
spec_kwargs['uncertainty'] = StdDevUncertainty(
spec_kwargs.get('uncertainty'))
return Spectrum1D(**spec_kwargs, wcs=wcs, meta=table.meta)
|
42,990 |
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): the list of modes to which the operator is applied on
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
52,784 |
def load_plugins(plugins_dict, plugin_source, extra_kwds={}, plugin_type_keys=['type']):
source_type, source = plugin_source
if source_type == "xml":
return __load_plugins_from_element(plugins_dict, source, extra_kwds)
else:
return __load_plugins_from_dicts(plugins_dict, source, extra_kwds, plugin_type_keys=plugin_type_keys)
|
def load_plugins(plugins_dict, plugin_source, extra_kwds=None, plugin_type_keys=('type',)):
if extra_kwds is None:
extra_kwds = {}
source_type, source = plugin_source
if source_type == "xml":
return __load_plugins_from_element(plugins_dict, source, extra_kwds)
else:
return __load_plugins_from_dicts(plugins_dict, source, extra_kwds, plugin_type_keys=plugin_type_keys)
|
53,271 |
def get_paschen_constants (gas,electrode):
r"""
Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage
Parameters
----------
gas : 'str'
electrode : 'str'
String representing the gas and electrode material
Return
------
dictionary containing the constants A, B and townsend_gamma for calculation of the breakdwn voltage
References
---------
Paschen_constants contains the coefficents A and B for the estimation of the
First Townsend Ionization Coefficent
(exponential fit to the First Townsend Ionization coefficient)
as adapted from
E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics,
Wiley-Interscience, New York 1971
format: paschen_constants dir {"gas":[A,B]}
units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)]
Townsend_gamma is the Second Townsend Ionization coefficient as given by
A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application
IOP Publishing Ltd 2016
ISBN 978-0-7503-1236-3 (ebook)
ISBN 978-0-7503-1237-0 (print)
Examples
--------
c=def get_paschen_constants ("Ar","Ni):
c={'A': 11, 'B': 135, 'gam': 0.058}
c=def get_paschen_constants ("Ar","zz"):
c={'A': 11, 'B': 135, 'gam': 0.01}
If electrode material is not found a default value of 0.01 is taken
c=def get_paschen_constants ("Zz","Ni"):
c=None
If gas is not found, c is set to None
"""
# Supported gases
gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"]
paschen_constants={"Air":[11,274],
"N2":[9.0, 257],
"H2":[3.8,104],
"He":[2.3,26],
"Ne":[3.0, 75],
"Ar":[11,135],
"Kr":[13,180],
"Xe":[20,263]}
# Supported electrode materials
materials=["Al","Cu","Ni","Pt","C","W","Fe"]
townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02},
"N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059},
"H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061},
"He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015},
"Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022},
"Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058},
"Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None},
"Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}}
# Check if the asked gas and electrode material is supported
resg= gas in gases
rese=electrode in materials
# If the gas is supported get the constants A and B
print(resg,rese)
if resg==True :
print(gas)
A=paschen_constants[gas][0]
B=paschen_constants[gas][1]
print(A,B)
# Get the townsend_gamma coefficient for the the gas/electrode combination
if rese==True:
gam=townsend_gamma[gas]
print(gam)
gn=gam[electrode]
print (gn)
# Test if townsend_gamma exists for the demanded gas/electrode configuration
# If not a default townsend_gamma value of 0.01 is taken
if gn is None:
gn=0.01
print("default")
print(gn)
else:
# If the electrode material is not supportes set townsend_gamma to default = 0.01
gn=0.01
print("default")
# Create output dir {const}
const={"A":A,"B":B,"gam":gn}
print(const)
return const
# If gas is not supported set const=None
else :
const=None
print("No constants for this gas available",const)
return const
|
def get_paschen_constants (gas,electrode):
r"""
Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage
Parameters
----------
gas : 'str'
electrode : 'str'
String representing the gas and electrode material
Return
------
dictionary containing the constants A, B and townsend_gamma for calculation of the breakdwn voltage
References
---------
Paschen_constants contains the coefficents A and B for the estimation of the
First Townsend Ionization Coefficent
(exponential fit to the First Townsend Ionization coefficient)
as adapted from
E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics,
Wiley-Interscience, New York 1971
format: paschen_constants dir {"gas":[A,B]}
units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)]
Townsend_gamma is the Second Townsend Ionization coefficient as given by
A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application
IOP Publishing Ltd 2016
ISBN 978-0-7503-1236-3 (ebook)
ISBN 978-0-7503-1237-0 (print)
Examples
--------
>>> get_paschen_constants("Ar", "Ni)
{'A': 11, 'B': 135, 'gam': 0.058}
If the electrode material is not found, then a default value of 0.01 is taken.
>>> get_paschen_constants("Ar", "zz")
{'A': 11, 'B': 135, 'gam': 0.01}
If ``gas`` is not found, ``c`` is set to `None`.
>>> get_paschen_constants("Zz", "Ni")
None
"""
# Supported gases
gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"]
paschen_constants={"Air":[11,274],
"N2":[9.0, 257],
"H2":[3.8,104],
"He":[2.3,26],
"Ne":[3.0, 75],
"Ar":[11,135],
"Kr":[13,180],
"Xe":[20,263]}
# Supported electrode materials
materials=["Al","Cu","Ni","Pt","C","W","Fe"]
townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02},
"N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059},
"H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061},
"He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015},
"Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022},
"Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058},
"Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None},
"Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}}
# Check if the asked gas and electrode material is supported
resg= gas in gases
rese=electrode in materials
# If the gas is supported get the constants A and B
print(resg,rese)
if resg==True :
print(gas)
A=paschen_constants[gas][0]
B=paschen_constants[gas][1]
print(A,B)
# Get the townsend_gamma coefficient for the the gas/electrode combination
if rese==True:
gam=townsend_gamma[gas]
print(gam)
gn=gam[electrode]
print (gn)
# Test if townsend_gamma exists for the demanded gas/electrode configuration
# If not a default townsend_gamma value of 0.01 is taken
if gn is None:
gn=0.01
print("default")
print(gn)
else:
# If the electrode material is not supportes set townsend_gamma to default = 0.01
gn=0.01
print("default")
# Create output dir {const}
const={"A":A,"B":B,"gam":gn}
print(const)
return const
# If gas is not supported set const=None
else :
const=None
print("No constants for this gas available",const)
return const
|
30,362 |
def long_running_loop():
"""
Runs in a long running container - checking for newly mirrored investigations and answered questions.
"""
while True:
error = ''
try:
check_for_mirrors()
check_for_answers()
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects,
requests.exceptions.RequestException, requests.exceptions.SSLError) as e:
error = 'Could not connect to the Slack endpoint: {}'.format(str(e))
except Exception as e:
error = 'An error occurred: {}'.format(str(e))
demisto.error(error)
finally:
if error:
demisto.updateModuleHealth(error)
time.sleep(1)
|
def long_running_loop():
"""
Runs in a long running container - checking for newly mirrored investigations and answered questions.
"""
while True:
error = ''
try:
check_for_mirrors()
check_for_answers()
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects,
requests.exceptions.RequestException, requests.exceptions.SSLError) as e:
error = f'Could not connect to the Slack endpoint: {str(e)}.'
except Exception as e:
error = 'An error occurred: {}'.format(str(e))
demisto.error(error)
finally:
if error:
demisto.updateModuleHealth(error)
time.sleep(1)
|
55,049 |
def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the Hamiltonian which imposes the constraint that each node has
an outflow of at most one.
The out flow constraint is, for all :math:`i`:
.. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)
where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has an outflow of at most one.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the out flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
def out_flow_constraint(graph: nx.DiGraph) -> qml.Hamiltonian:
r"""Calculates the Hamiltonian which imposes the constraint that each node has
an outflow of at most one.
Given a subset of edges in a directed graph, the out-flow constraint imposes that at most one
edge can leave any given node, i.e., for all :math:`i`:
.. math:: \sum_{j,(i,j)\in E}x_{ij} \leq 1,
where :math:`E` are the edges of the graph and :math:`x_{ij}` is a binary number that selects
whether to include the edge :math:`(i, j)`.
The corresponding qubit Hamiltonian is:
.. math::
\frac{1}{4}\sum_{i\in V}\left(d_{i}^{out}(d_{i}^{out} - 2)\mathbb{I}
- 2(d_{i}^{out}-1)\sum_{j,(i,j)\in E}\hat{Z}_{ij} +
\left( \sum_{j,(i,j)\in E}\hat{Z}_{ij} \right)^{2}\right)
where :math:`V` are the graph vertices and :math:`Z_{ij}` is a qubit Pauli-Z matrix acting
upon the qubit specified by the pair :math:`(i, j)`. Note that this function omits the
:math:`1/4` constant factor.
This Hamiltonian is minimized by selecting edges such that each node has an outflow of at most one.
Args:
graph (nx.DiGraph): the graph specifying possible edges
Returns:
qml.Hamiltonian: the out flow constraint Hamiltonian
Raises:
ValueError: if the input graph is not directed
"""
if not hasattr(graph, "out_edges"):
raise ValueError("Input graph must be directed")
hamiltonian = qml.Hamiltonian([], [])
for node in graph.nodes:
hamiltonian += _inner_out_flow_constraint_hamiltonian(graph, node)
return hamiltonian
|
9,501 |
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
channel=dict(required=True, type='str'),
property=dict(required=True, type='str'),
value_type=dict(required=False, type='list'),
value=dict(required=False, type='list'),
state=dict(default='present',
choices=['present', 'get', 'absent'],
type='str'),
force_array=dict(default=False, type='bool')
),
supports_check_mode=True
)
state_values = {"present": "set", "absent": "unset", "get": "get"}
# Assign module values to dictionary values
channel = module.params['channel']
property = module.params['property']
values = module.params['value']
value_types = module.params['value_type']
if values is not None and value_types is not None:
if len(values) != len(value_types):
module.fail_json(msg='Same number of "value" and "value_type" needed')
for i in range(len(values)):
if values[i].lower() == "true" or values[i].lower() == "false":
values[i] = values[i].lower()
for value_type in value_types:
if value_type not in ['int', 'bool', 'float', 'string']:
module.fail_json(msg='value_type %s is not supported'
% str(value_type))
else:
values = value_types = None
array = module.params['force_array'] or values is not None and len(values) > 1
state = state_values[module.params['state']]
# Initialize some variables for later
change = False
new_values = ''
if state != "get":
if values is None or values[0] == "":
module.fail_json(msg='State %s requires "value" to be set'
% str(state))
elif value_types is None or value_types[0] == "":
module.fail_json(msg='State %s requires "value_type" to be set'
% str(state))
# Create a Xfconf preference
xfconf = XfconfPreference(module,
channel,
property,
value_types,
values,
array)
# Now we get the current values, if not found don't fail
dummy, current_values = xfconf.call("get", fail_onerr=False)
# Convert current_values to array format
if "Value is an array with" in current_values:
current_values = current_values.split("\n")
current_values.pop(0)
current_values.pop(0)
else:
current_values = [current_values]
# Check if the current values equals the values we want to set. If not,
# make a change
if current_values != values and state != "get":
# If check mode, we know a change would have occurred.
if module.check_mode:
# So we will set the change to True
change = True
# And set the new_values to the values that would have been set
new_values = values
# If not check mode make the change.
else:
change, new_values = xfconf.call(state)
# If the value we want to set is the same as the current_values, we will
# set the new_values to the current_values for reporting
else:
new_values = current_values
facts = dict(xfconf={'changed': change,
'channel': channel,
'property': property,
'value_type': value_types,
'new_value': new_values,
'previous_value': current_values,
'playbook_value': values})
module.exit_json(changed=change, ansible_facts=facts)
|
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
channel=dict(required=True, type='str'),
property=dict(required=True, type='str'),
value_type=dict(required=False, type='list'),
value=dict(required=False, type='list'),
state=dict(default='present',
choices=['present', 'get', 'absent'],
type='str'),
force_array=dict(default=False, type='bool')
),
supports_check_mode=True
)
state_values = {"present": "set", "absent": "unset", "get": "get"}
# Assign module values to dictionary values
channel = module.params['channel']
property = module.params['property']
values = module.params['value']
value_types = module.params['value_type']
if values is not None and value_types is not None:
if len(values) != len(value_types):
module.fail_json(msg='Same number of "value" and "value_type" needed')
for i in range(len(values)):
if values[i].lower() == "true" or values[i].lower() == "false":
values[i] = values[i].lower()
for value_type in value_types:
if value_type not in ['int', 'bool', 'float', 'string']:
module.fail_json(msg='value_type %s is not supported'
% str(value_type))
else:
values = value_types = None
array = module.params['force_array'] or values is not None and len(values) > 1
state = state_values[module.params['state']]
# Initialize some variables for later
change = False
new_values = ''
if state != "get":
if values is None or values[0] == "":
module.fail_json(msg='State %s requires "value" to be set'
% str(state))
elif value_types is None or value_types[0] == "":
module.fail_json(msg='State %s requires "value_type" to be set'
% str(state))
# Create a Xfconf preference
xfconf = XfconfPreference(module,
channel,
property,
value_types,
values,
array)
# Now we get the current values, if not found don't fail
dummy, current_values = xfconf.call("get", fail_onerr=False)
# Convert current_values to array format
if "Value is an array with" in current_values:
current_values = current_values.split("\n")[2:]
current_values.pop(0)
current_values.pop(0)
else:
current_values = [current_values]
# Check if the current values equals the values we want to set. If not,
# make a change
if current_values != values and state != "get":
# If check mode, we know a change would have occurred.
if module.check_mode:
# So we will set the change to True
change = True
# And set the new_values to the values that would have been set
new_values = values
# If not check mode make the change.
else:
change, new_values = xfconf.call(state)
# If the value we want to set is the same as the current_values, we will
# set the new_values to the current_values for reporting
else:
new_values = current_values
facts = dict(xfconf={'changed': change,
'channel': channel,
'property': property,
'value_type': value_types,
'new_value': new_values,
'previous_value': current_values,
'playbook_value': values})
module.exit_json(changed=change, ansible_facts=facts)
|
45,785 |
def compute_intensity_transformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor]):
r"""Compute the applied transformation matrix :math: `(*, 4, 4)`.
Args:
input (torch.Tensor): Tensor to be transformed with shape :math:`(D, H, W)`, :math:`(C, D, H, W)`,
:math:`(*, C, D, H, W)`.
params (Dict[str, torch.Tensor]):
- params['batch_prob']: A boolean tensor that indicating whether if to transform an image in a batch.
Returns:
torch.Tensor: The applied transformation matrix :math: `(*, 4, 4)` Returns identity transformations.
"""
input = _transform_input3d(input)
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
identity: torch.Tensor = torch.eye(4, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)
return identity
|
def compute_intensity_transformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:
r"""Compute the applied transformation matrix :math: `(*, 4, 4)`.
Args:
input (torch.Tensor): Tensor to be transformed with shape :math:`(D, H, W)`, :math:`(C, D, H, W)`,
:math:`(*, C, D, H, W)`.
params (Dict[str, torch.Tensor]):
- params['batch_prob']: A boolean tensor that indicating whether if to transform an image in a batch.
Returns:
torch.Tensor: The applied transformation matrix :math: `(*, 4, 4)` Returns identity transformations.
"""
input = _transform_input3d(input)
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
identity: torch.Tensor = torch.eye(4, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)
return identity
|
35,520 |
def modeld_lagging_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NormalPermanentAlert("Driving model lagging", f"{round(sm['modelV2'].frameDropPerc, 1)}% frames dropped")
|
def modeld_lagging_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NormalPermanentAlert("Driving model lagging", f"{sm['modelV2'].frameDropPerc:.1f}% frames dropped")
|
44,966 |
def multiprocessing_safe_run_and_retrieve(
queue: multiprocessing.Queue,
payload: bytes,
) -> None:
"""
Gets the return value from a function and puts it in a multiprocessing-safe
container. Helper function for `run_with_multiprocess_timeout`, must be defined
top-level so it can be pickled and sent to `multiprocessing.Process`
Passing the payload serialized allows us to escape the limitations of the python
native pickler which will fail on tasks defined in scripts because of name
mismatches. Whilst this particular example only affects the `func` arg, any of the
others could be affected by other pickle limitations as well.
Args:
- queue (multiprocessing.Queue): The queue to pass the resulting payload to
- payload (bytes): A serialized dictionary containing the data required to run
the function. Should be serialized with `cloudpickle.dumps`
Expects the following keys:
- fn (Callable): The function to call
- args (list): Positional argument values to call the function with
- kwargs (Mapping): Keyword arguments to call the function with
- context (dict): The prefect context dictionary to use during execution
- name (str): an optional name to attach to logs for this function run,
defaults to the name of the given function. Provides an interface for
passing task names for logs.
- logger (Logger): the logger to use
"""
request = cloudpickle.loads(payload)
fn: Callable = request["fn"]
context: dict = request.get("context", {})
args: Sequence = request.get("args", [])
kwargs: Mapping = request.get("kwargs", {})
name: str = request.get("name", f"Function '{fn.__name__}'")
logger: Logger = request.get("logger") or get_logger()
try:
with prefect.context(context):
logger.debug(f"{name}: Executing...")
return_val = fn(*args, **kwargs)
logger.debug(f"{name}: Execution successful.")
except Exception as exc:
return_val = exc
logger.debug(
f"{name}: Encountered a {type(exc).__name__}, "
f"returning details as a result..."
)
try:
pickled_val = cloudpickle.dumps(return_val)
except Exception as exc:
base_msg = (
f"Failed to pickle result of type {type(return_val).__name__!r} with "
f'exception: "{type(exc).__name__}: {str(exc)}".'
)
logger.error(f"{name}: {base_msg}")
pickled_val = cloudpickle.dumps(
RuntimeError(
f"{base_msg} This timeout handler requires your function return "
f"value to be serializable with `cloudpickle`. "
"If you must return a unserializable value, consider switching your "
"executor to use processes instead of threads to use a different "
"timeout handler."
)
)
logger.debug(f"{name}: Passing result back to main process...")
try:
queue.put(pickled_val)
except Exception:
logger.error(
f"{name}: Failed to put result in queue to main process!",
exc_info=True,
)
raise
|
def multiprocessing_safe_run_and_retrieve(
queue: multiprocessing.Queue,
payload: bytes,
) -> None:
"""
Gets the return value from a function and puts it in a multiprocessing-safe
container. Helper function for `run_with_multiprocess_timeout`, must be defined
top-level so it can be pickled and sent to `multiprocessing.Process`
Passing the payload serialized allows us to escape the limitations of the python
native pickler which will fail on tasks defined in scripts because of name
mismatches. Whilst this particular example only affects the `func` arg, any of the
others could be affected by other pickle limitations as well.
Args:
- queue (multiprocessing.Queue): The queue to pass the resulting payload to
- payload (bytes): A serialized dictionary containing the data required to run
the function. Should be serialized with `cloudpickle.dumps`
Expects the following keys:
- fn (Callable): The function to call
- args (list): Positional argument values to call the function with
- kwargs (Mapping): Keyword arguments to call the function with
- context (dict): The prefect context dictionary to use during execution
- name (str): an optional name to attach to logs for this function run,
defaults to the name of the given function. Provides an interface for
passing task names for logs.
- logger (Logger): the logger to use
"""
request = cloudpickle.loads(payload)
fn: Callable = request["fn"]
context: dict = request.get("context", {})
args: Sequence = request.get("args", [])
kwargs: Mapping = request.get("kwargs", {})
name: str = request.get("name", f"Function '{fn.__name__}'")
logger: Logger = request.get("logger") or get_logger()
try:
with prefect.context(context):
logger.debug(f"{name}: Executing...")
return_val = fn(*args, **kwargs)
logger.debug(f"{name}: Execution successful.")
except Exception as exc:
return_val = exc
logger.debug(
f"{name}: Encountered a {type(exc).__name__}, "
f"returning details as a result..."
)
try:
pickled_val = cloudpickle.dumps(return_val)
except Exception as exc:
base_msg = (
f"Failed to pickle result of type {type(return_val).__name__!r} with "
f'exception: "{type(exc).__name__}: {str(exc)}".'
)
logger.error(f"{name}: {base_msg}", exc_info=True)
pickled_val = cloudpickle.dumps(
RuntimeError(
f"{base_msg} This timeout handler requires your function return "
f"value to be serializable with `cloudpickle`. "
"If you must return a unserializable value, consider switching your "
"executor to use processes instead of threads to use a different "
"timeout handler."
)
)
logger.debug(f"{name}: Passing result back to main process...")
try:
queue.put(pickled_val)
except Exception:
logger.error(
f"{name}: Failed to put result in queue to main process!",
exc_info=True,
)
raise
|
40,731 |
def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer: trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler: Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save: dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.Checkpoint` instance.
save_every_iters: saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path: output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler: learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats: if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names: list of names associated with `update_function` output dictionary.
with_pbars: if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters: logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan: if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler: Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.checkpoint.Checkpoint`.
"""
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(
trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(
trainer,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
|
def setup_common_training_handlers(
trainer: Engine,
train_sampler: Optional[DistributedSampler] = None,
to_save: Optional[Mapping] = None,
save_every_iters: int = 1000,
output_path: Optional[str] = None,
lr_scheduler: Optional[Union[ParamScheduler, _LRScheduler]] = None,
with_gpu_stats: bool = False,
output_names: Optional[Iterable[str]] = None,
with_pbars: bool = True,
with_pbar_on_iters: bool = True,
log_every_iters: int = 100,
stop_on_nan: bool = True,
clear_cuda_cache: bool = True,
save_handler: Optional[Union[Callable, BaseSaveHandler]] = None,
**kwargs: Any,
) -> None:
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer: trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler: Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save: dictionary with objects to save in the checkpoint. This argument is passed to
:class:`~ignite.handlers.Checkpoint` instance.
save_every_iters: saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path: output path to indicate where `to_save` objects are stored. Argument is mutually
exclusive with ``save_handler``.
lr_scheduler: learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats: if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names: list of names associated with `update_function` output dictionary.
with_pbars: if True, two progress bars on epochs and optionally on iterations are attached.
Default, True.
with_pbar_on_iters: if True, a progress bar on iterations is attached to the trainer.
Default, True.
log_every_iters: logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar. Default, 100.
stop_on_nan: if True, :class:`~ignite.handlers.TerminateOnNan` handler is added to the trainer.
Default, True.
clear_cuda_cache: if True, `torch.cuda.empty_cache()` is called every end of epoch.
Default, True.
save_handler: Method or callable
class to use to store ``to_save``. See :class:`~ignite.handlers.checkpoint.Checkpoint` for more details.
Argument is mutually exclusive with ``output_path``.
kwargs: optional keyword args to be passed to construct :class:`~ignite.handlers.Checkpoint`.
"""
if idist.get_world_size() > 1:
_setup_common_distrib_training_handlers(
trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
else:
if train_sampler is not None and isinstance(train_sampler, DistributedSampler):
warnings.warn(
"Argument train_sampler is a distributed sampler,"
" but either there is no distributed setting or world size is < 2. "
"Train sampler argument will be ignored",
UserWarning,
)
_setup_common_training_handlers(
trainer,
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
stop_on_nan=stop_on_nan,
clear_cuda_cache=clear_cuda_cache,
save_handler=save_handler,
**kwargs,
)
|
13,087 |
def attribute_is_list(attr):
"""
Checks if attribute denotes a list, and returns the regular expression if so
:param attr: attr or attr[index]
:return: attr, re or None
"""
list_index_update = re.match('(.+)\\[([0-9]+)\\]', attr)
if list_index_update:
attr = list_index_update.group(1)
return attr, list_index_update.group(2) if list_index_update else None
|
def attribute_is_list(attr):
"""
Checks if attribute denotes a list, and returns the name of the list and the given list index if so
:param attr: attr or attr[index]
:return: attr, re or None
"""
list_index_update = re.match('(.+)\\[([0-9]+)\\]', attr)
if list_index_update:
attr = list_index_update.group(1)
return attr, list_index_update.group(2) if list_index_update else None
|
34,123 |
def list_directory(path: Text) -> List[Text]:
"""Returns all files and folders excluding hidden files.
If the path points to a file, returns the file. This is a recursive
implementation returning files in any depth of the path."""
if not isinstance(path, str):
raise ValueError("Resourcename must be a string type. "
"Got `{}` instead".format(type(path)))
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
results = []
for base, dirs, files in os.walk(path):
# remove hidden files
goodfiles = filter(lambda x: not x.startswith('.'), files)
results.extend(os.path.join(base, f) for f in goodfiles)
return results
else:
raise ValueError("Could not locate the resource '{}'."
"".format(os.path.abspath(path)))
|
def list_directory(path: Text) -> List[Text]:
"""Returns all files and folders excluding hidden files.
If the path points to a file, returns the file. This is a recursive
implementation returning files in any depth of the path."""
if not isinstance(path, str):
raise ValueError("`resource_name` must be a string type. "
"Got `{}` instead".format(type(path)))
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
results = []
for base, dirs, files in os.walk(path):
# remove hidden files
goodfiles = filter(lambda x: not x.startswith('.'), files)
results.extend(os.path.join(base, f) for f in goodfiles)
return results
else:
raise ValueError("Could not locate the resource '{}'."
"".format(os.path.abspath(path)))
|
48,939 |
def get_placeholders(provider):
placeholders = dict(os.environ)
placeholders.setdefault('PGHOME', os.path.expanduser('~'))
placeholders.setdefault('APIPORT', '8008')
placeholders.setdefault('BACKUP_SCHEDULE', '0 1 * * *')
placeholders.setdefault('BACKUP_NUM_TO_RETAIN', '5')
placeholders.setdefault('CRONTAB', '[]')
placeholders.setdefault('PGROOT', os.path.join(placeholders['PGHOME'], 'pgroot'))
placeholders.setdefault('WALE_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp')))
placeholders.setdefault('PGDATA', os.path.join(placeholders['PGROOT'], 'pgdata'))
placeholders.setdefault('HUMAN_ROLE', 'zalandos')
placeholders.setdefault('PGUSER_STANDBY', 'standby')
placeholders.setdefault('PGPASSWORD_STANDBY', 'standby')
placeholders.setdefault('USE_ADMIN', 'PGPASSWORD_ADMIN' in placeholders)
placeholders.setdefault('PGUSER_ADMIN', 'admin')
placeholders.setdefault('PGPASSWORD_ADMIN', 'cola')
placeholders.setdefault('PGUSER_SUPERUSER', 'postgres')
placeholders.setdefault('PGPASSWORD_SUPERUSER', 'zalando')
placeholders.setdefault('ALLOW_NOSSL', '')
placeholders.setdefault('BGMON_LISTEN_IP', '0.0.0.0')
placeholders.setdefault('PGPORT', '5432')
placeholders.setdefault('SCOPE', 'dummy')
placeholders.setdefault('RW_DIR', RW_DIR)
placeholders.setdefault('SSL_TEST_RELOAD', 'SSL_PRIVATE_KEY_FILE' in os.environ)
placeholders.setdefault('SSL_CA_FILE', '')
placeholders.setdefault('SSL_CRL_FILE', '')
placeholders.setdefault('SSL_CERTIFICATE_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.crt'))
placeholders.setdefault('SSL_PRIVATE_KEY_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.key'))
placeholders.setdefault('SSL_RESTAPI_CA_FILE', '')
placeholders.setdefault('SSL_RESTAPI_CERTIFICATE_FILE', os.path.join(placeholders['RW_DIR'], 'certs',
'rest-api-server.crt'))
placeholders.setdefault('SSL_RESTAPI_PRIVATE_KEY_FILE', os.path.join(placeholders['RW_DIR'], 'certs',
'restapi-api-server.key'))
placeholders.setdefault('WALE_BACKUP_THRESHOLD_MEGABYTES', 102400)
placeholders.setdefault('WALE_BACKUP_THRESHOLD_PERCENTAGE', 30)
placeholders.setdefault('INITDB_LOCALE', 'en_US')
# if Kubernetes is defined as a DCS, derive the namespace from the POD_NAMESPACE, if not set explicitely.
# We only do this for Kubernetes DCS, as we don't want to suddently change, i.e. DCS base path when running
# in Kubernetes with Etcd in a non-default namespace
placeholders.setdefault('NAMESPACE', placeholders.get('POD_NAMESPACE', 'default')
if USE_KUBERNETES and placeholders.get('DCS_ENABLE_KUBERNETES_API') else '')
# use namespaces to set WAL bucket prefix scope naming the folder namespace-clustername for non-default namespace.
placeholders.setdefault('WAL_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE'])
if placeholders['NAMESPACE'] not in ('default', '') else '')
placeholders.setdefault('WAL_BUCKET_SCOPE_SUFFIX', '')
placeholders.setdefault('WALE_ENV_DIR', os.path.join(placeholders['RW_DIR'], 'etc', 'wal-e.d', 'env'))
placeholders.setdefault('USE_WALE', False)
cpu_count = str(min(psutil.cpu_count(), 10))
placeholders.setdefault('WALG_DOWNLOAD_CONCURRENCY', cpu_count)
placeholders.setdefault('WALG_UPLOAD_CONCURRENCY', cpu_count)
placeholders.setdefault('PAM_OAUTH2', '')
placeholders.setdefault('CALLBACK_SCRIPT', '')
placeholders.setdefault('DCS_ENABLE_KUBERNETES_API', '')
placeholders.setdefault('KUBERNETES_ROLE_LABEL', 'spilo-role')
placeholders.setdefault('KUBERNETES_SCOPE_LABEL', 'version')
placeholders.setdefault('KUBERNETES_LABELS', KUBERNETES_DEFAULT_LABELS)
placeholders.setdefault('KUBERNETES_USE_CONFIGMAPS', '')
placeholders.setdefault('KUBERNETES_BYPASS_API_SERVICE', 'true')
placeholders.setdefault('USE_PAUSE_AT_RECOVERY_TARGET', False)
placeholders.setdefault('CLONE_METHOD', '')
placeholders.setdefault('CLONE_WITH_WALE', '')
placeholders.setdefault('CLONE_WITH_BASEBACKUP', '')
placeholders.setdefault('CLONE_TARGET_TIME', '')
placeholders.setdefault('CLONE_TARGET_INCLUSIVE', True)
placeholders.setdefault('LOG_SHIP_SCHEDULE', '1 0 * * *')
placeholders.setdefault('LOG_S3_BUCKET', '')
placeholders.setdefault('LOG_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp')))
placeholders.setdefault('LOG_BUCKET_SCOPE_SUFFIX', '')
# see comment for wal-e bucket prefix
placeholders.setdefault('LOG_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE'])
if placeholders['NAMESPACE'] not in ('default', '') else '')
if placeholders['CLONE_METHOD'] == 'CLONE_WITH_WALE':
# modify placeholders and take care of error cases
name = set_extended_wale_placeholders(placeholders, 'CLONE_')
if name is False:
logging.warning('Cloning with WAL-E is only possible when CLONE_WALE_*_PREFIX '
'or CLONE_WALG_*_PREFIX or CLONE_WAL_*_BUCKET and CLONE_SCOPE are set.')
elif name == 'S3':
placeholders.setdefault('CLONE_USE_WALG', 'true')
elif placeholders['CLONE_METHOD'] == 'CLONE_WITH_BASEBACKUP':
clone_scope = placeholders.get('CLONE_SCOPE')
if clone_scope and placeholders.get('CLONE_HOST') \
and placeholders.get('CLONE_USER') and placeholders.get('CLONE_PASSWORD'):
placeholders['CLONE_WITH_BASEBACKUP'] = True
placeholders.setdefault('CLONE_PGPASS', os.path.join(placeholders['PGHOME'],
'.pgpass_{0}'.format(clone_scope)))
placeholders.setdefault('CLONE_PORT', 5432)
else:
logging.warning("Clone method is set to basebackup, but no 'CLONE_SCOPE' "
"or 'CLONE_HOST' or 'CLONE_USER' or 'CLONE_PASSWORD' specified")
else:
if set_extended_wale_placeholders(placeholders, 'STANDBY_') == 'S3':
placeholders.setdefault('STANDBY_USE_WALG', 'true')
placeholders.setdefault('STANDBY_WITH_WALE', '')
placeholders.setdefault('STANDBY_HOST', '')
placeholders.setdefault('STANDBY_PORT', '')
placeholders.setdefault('STANDBY_CLUSTER', placeholders['STANDBY_WITH_WALE'] or placeholders['STANDBY_HOST'])
if provider == PROVIDER_AWS and not USE_KUBERNETES:
# AWS specific callback to tag the instances with roles
placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_aws.py'
if placeholders.get('EIP_ALLOCATION'):
placeholders['CALLBACK_SCRIPT'] += ' ' + placeholders['EIP_ALLOCATION']
if any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE):
placeholders.setdefault('USE_WALG_RESTORE', 'true')
if placeholders.get('WALG_AZ_PREFIX'):
placeholders.setdefault('USE_WALG_BACKUP', 'true')
if all(placeholders.get(n) for n in WALG_SSH_NAMES):
placeholders.setdefault('USE_WALG_BACKUP', 'true')
set_walg_placeholders(placeholders)
placeholders['USE_WALE'] = any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE +
('WAL_SWIFT_BUCKET', 'WALE_SWIFT_PREFIX', 'WAL_GCS_BUCKET',
'WAL_GS_BUCKET', 'WALE_GS_PREFIX', 'WALG_GS_PREFIX'))
if placeholders.get('WALG_BACKUP_FROM_REPLICA'):
placeholders['WALG_BACKUP_FROM_REPLICA'] = str(placeholders['WALG_BACKUP_FROM_REPLICA']).lower()
# Kubernetes requires a callback to change the labels in order to point to the new master
if USE_KUBERNETES:
if not placeholders.get('DCS_ENABLE_KUBERNETES_API'):
placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_role.py'
placeholders.setdefault('postgresql', {})
placeholders['postgresql'].setdefault('parameters', {})
placeholders['WALE_BINARY'] = 'wal-g' if placeholders.get('USE_WALG_BACKUP') == 'true' else 'wal-e'
placeholders['postgresql']['parameters']['archive_command'] = \
'envdir "{WALE_ENV_DIR}" {WALE_BINARY} wal-push "%p"'.format(**placeholders) \
if placeholders['USE_WALE'] else '/bin/true'
if os.path.exists(MEMORY_LIMIT_IN_BYTES_PATH):
with open(MEMORY_LIMIT_IN_BYTES_PATH) as f:
os_memory_mb = int(f.read()) / 1048576
else:
os_memory_mb = sys.maxsize
os_memory_mb = min(os_memory_mb, os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / 1048576)
# Depending on environment we take 1/4 or 1/5 of the memory, expressed in full MB's
sb_ratio = 5 if USE_KUBERNETES else 4
placeholders['postgresql']['parameters']['shared_buffers'] = '{}MB'.format(int(os_memory_mb/sb_ratio))
# # 1 connection per 30 MB, at least 100, at most 1000
placeholders['postgresql']['parameters']['max_connections'] = min(max(100, int(os_memory_mb/30)), 1000)
placeholders['instance_data'] = get_instance_metadata(provider)
placeholders['BGMON_LISTEN_IP'] = get_listen_ip()
if 'SSL_CA' in placeholders and placeholders['SSL_CA_FILE'] == '':
placeholders['SSL_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'ca.crt')
if 'SSL_CRL' in placeholders and placeholders['SSL_CRL_FILE'] == '':
placeholders['SSL_CRL_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'server.crl')
ssl_keys = ['SSL_RESTAPI_CERTIFICATE', 'SSL_RESTAPI_PRIVATE_KEY']
if not set(ssl_keys) <= set(placeholders):
placeholders['SSL_RESTAPI_CERTIFICATE_FILE'] = ''
placeholders['SSL_RESTAPI_PRIVATE_KEY_FILE'] = ''
placeholders['SSL_RESTAPI_CA_FILE'] = ''
placeholders['SSL_RESTAPI_CA'] = ''
elif 'SSL_RESTAPI_CA' in placeholders and placeholders['SSL_RESTAPI_CA_FILE'] == '':
placeholders['SSL_RESTAPI_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'rest-api-ca.crt')
return placeholders
|
def get_placeholders(provider):
placeholders = dict(os.environ)
placeholders.setdefault('PGHOME', os.path.expanduser('~'))
placeholders.setdefault('APIPORT', '8008')
placeholders.setdefault('BACKUP_SCHEDULE', '0 1 * * *')
placeholders.setdefault('BACKUP_NUM_TO_RETAIN', '5')
placeholders.setdefault('CRONTAB', '[]')
placeholders.setdefault('PGROOT', os.path.join(placeholders['PGHOME'], 'pgroot'))
placeholders.setdefault('WALE_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp')))
placeholders.setdefault('PGDATA', os.path.join(placeholders['PGROOT'], 'pgdata'))
placeholders.setdefault('HUMAN_ROLE', 'zalandos')
placeholders.setdefault('PGUSER_STANDBY', 'standby')
placeholders.setdefault('PGPASSWORD_STANDBY', 'standby')
placeholders.setdefault('USE_ADMIN', 'PGPASSWORD_ADMIN' in placeholders)
placeholders.setdefault('PGUSER_ADMIN', 'admin')
placeholders.setdefault('PGPASSWORD_ADMIN', 'cola')
placeholders.setdefault('PGUSER_SUPERUSER', 'postgres')
placeholders.setdefault('PGPASSWORD_SUPERUSER', 'zalando')
placeholders.setdefault('ALLOW_NOSSL', '')
placeholders.setdefault('BGMON_LISTEN_IP', '0.0.0.0')
placeholders.setdefault('PGPORT', '5432')
placeholders.setdefault('SCOPE', 'dummy')
placeholders.setdefault('RW_DIR', RW_DIR)
placeholders.setdefault('SSL_TEST_RELOAD', 'SSL_PRIVATE_KEY_FILE' in os.environ)
placeholders.setdefault('SSL_CA_FILE', '')
placeholders.setdefault('SSL_CRL_FILE', '')
placeholders.setdefault('SSL_CERTIFICATE_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.crt'))
placeholders.setdefault('SSL_PRIVATE_KEY_FILE', os.path.join(placeholders['RW_DIR'], 'certs', 'server.key'))
placeholders.setdefault('SSL_RESTAPI_CA_FILE', '')
placeholders.setdefault('SSL_RESTAPI_CERTIFICATE_FILE', '')
placeholders.setdefault('SSL_RESTAPI_PRIVATE_KEY_FILE', '')
placeholders.setdefault('WALE_BACKUP_THRESHOLD_MEGABYTES', 102400)
placeholders.setdefault('WALE_BACKUP_THRESHOLD_PERCENTAGE', 30)
placeholders.setdefault('INITDB_LOCALE', 'en_US')
# if Kubernetes is defined as a DCS, derive the namespace from the POD_NAMESPACE, if not set explicitely.
# We only do this for Kubernetes DCS, as we don't want to suddently change, i.e. DCS base path when running
# in Kubernetes with Etcd in a non-default namespace
placeholders.setdefault('NAMESPACE', placeholders.get('POD_NAMESPACE', 'default')
if USE_KUBERNETES and placeholders.get('DCS_ENABLE_KUBERNETES_API') else '')
# use namespaces to set WAL bucket prefix scope naming the folder namespace-clustername for non-default namespace.
placeholders.setdefault('WAL_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE'])
if placeholders['NAMESPACE'] not in ('default', '') else '')
placeholders.setdefault('WAL_BUCKET_SCOPE_SUFFIX', '')
placeholders.setdefault('WALE_ENV_DIR', os.path.join(placeholders['RW_DIR'], 'etc', 'wal-e.d', 'env'))
placeholders.setdefault('USE_WALE', False)
cpu_count = str(min(psutil.cpu_count(), 10))
placeholders.setdefault('WALG_DOWNLOAD_CONCURRENCY', cpu_count)
placeholders.setdefault('WALG_UPLOAD_CONCURRENCY', cpu_count)
placeholders.setdefault('PAM_OAUTH2', '')
placeholders.setdefault('CALLBACK_SCRIPT', '')
placeholders.setdefault('DCS_ENABLE_KUBERNETES_API', '')
placeholders.setdefault('KUBERNETES_ROLE_LABEL', 'spilo-role')
placeholders.setdefault('KUBERNETES_SCOPE_LABEL', 'version')
placeholders.setdefault('KUBERNETES_LABELS', KUBERNETES_DEFAULT_LABELS)
placeholders.setdefault('KUBERNETES_USE_CONFIGMAPS', '')
placeholders.setdefault('KUBERNETES_BYPASS_API_SERVICE', 'true')
placeholders.setdefault('USE_PAUSE_AT_RECOVERY_TARGET', False)
placeholders.setdefault('CLONE_METHOD', '')
placeholders.setdefault('CLONE_WITH_WALE', '')
placeholders.setdefault('CLONE_WITH_BASEBACKUP', '')
placeholders.setdefault('CLONE_TARGET_TIME', '')
placeholders.setdefault('CLONE_TARGET_INCLUSIVE', True)
placeholders.setdefault('LOG_SHIP_SCHEDULE', '1 0 * * *')
placeholders.setdefault('LOG_S3_BUCKET', '')
placeholders.setdefault('LOG_TMPDIR', os.path.abspath(os.path.join(placeholders['PGROOT'], '../tmp')))
placeholders.setdefault('LOG_BUCKET_SCOPE_SUFFIX', '')
# see comment for wal-e bucket prefix
placeholders.setdefault('LOG_BUCKET_SCOPE_PREFIX', '{0}-'.format(placeholders['NAMESPACE'])
if placeholders['NAMESPACE'] not in ('default', '') else '')
if placeholders['CLONE_METHOD'] == 'CLONE_WITH_WALE':
# modify placeholders and take care of error cases
name = set_extended_wale_placeholders(placeholders, 'CLONE_')
if name is False:
logging.warning('Cloning with WAL-E is only possible when CLONE_WALE_*_PREFIX '
'or CLONE_WALG_*_PREFIX or CLONE_WAL_*_BUCKET and CLONE_SCOPE are set.')
elif name == 'S3':
placeholders.setdefault('CLONE_USE_WALG', 'true')
elif placeholders['CLONE_METHOD'] == 'CLONE_WITH_BASEBACKUP':
clone_scope = placeholders.get('CLONE_SCOPE')
if clone_scope and placeholders.get('CLONE_HOST') \
and placeholders.get('CLONE_USER') and placeholders.get('CLONE_PASSWORD'):
placeholders['CLONE_WITH_BASEBACKUP'] = True
placeholders.setdefault('CLONE_PGPASS', os.path.join(placeholders['PGHOME'],
'.pgpass_{0}'.format(clone_scope)))
placeholders.setdefault('CLONE_PORT', 5432)
else:
logging.warning("Clone method is set to basebackup, but no 'CLONE_SCOPE' "
"or 'CLONE_HOST' or 'CLONE_USER' or 'CLONE_PASSWORD' specified")
else:
if set_extended_wale_placeholders(placeholders, 'STANDBY_') == 'S3':
placeholders.setdefault('STANDBY_USE_WALG', 'true')
placeholders.setdefault('STANDBY_WITH_WALE', '')
placeholders.setdefault('STANDBY_HOST', '')
placeholders.setdefault('STANDBY_PORT', '')
placeholders.setdefault('STANDBY_CLUSTER', placeholders['STANDBY_WITH_WALE'] or placeholders['STANDBY_HOST'])
if provider == PROVIDER_AWS and not USE_KUBERNETES:
# AWS specific callback to tag the instances with roles
placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_aws.py'
if placeholders.get('EIP_ALLOCATION'):
placeholders['CALLBACK_SCRIPT'] += ' ' + placeholders['EIP_ALLOCATION']
if any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE):
placeholders.setdefault('USE_WALG_RESTORE', 'true')
if placeholders.get('WALG_AZ_PREFIX'):
placeholders.setdefault('USE_WALG_BACKUP', 'true')
if all(placeholders.get(n) for n in WALG_SSH_NAMES):
placeholders.setdefault('USE_WALG_BACKUP', 'true')
set_walg_placeholders(placeholders)
placeholders['USE_WALE'] = any(placeholders.get(n) for n in AUTO_ENABLE_WALG_RESTORE +
('WAL_SWIFT_BUCKET', 'WALE_SWIFT_PREFIX', 'WAL_GCS_BUCKET',
'WAL_GS_BUCKET', 'WALE_GS_PREFIX', 'WALG_GS_PREFIX'))
if placeholders.get('WALG_BACKUP_FROM_REPLICA'):
placeholders['WALG_BACKUP_FROM_REPLICA'] = str(placeholders['WALG_BACKUP_FROM_REPLICA']).lower()
# Kubernetes requires a callback to change the labels in order to point to the new master
if USE_KUBERNETES:
if not placeholders.get('DCS_ENABLE_KUBERNETES_API'):
placeholders['CALLBACK_SCRIPT'] = 'python3 /scripts/callback_role.py'
placeholders.setdefault('postgresql', {})
placeholders['postgresql'].setdefault('parameters', {})
placeholders['WALE_BINARY'] = 'wal-g' if placeholders.get('USE_WALG_BACKUP') == 'true' else 'wal-e'
placeholders['postgresql']['parameters']['archive_command'] = \
'envdir "{WALE_ENV_DIR}" {WALE_BINARY} wal-push "%p"'.format(**placeholders) \
if placeholders['USE_WALE'] else '/bin/true'
if os.path.exists(MEMORY_LIMIT_IN_BYTES_PATH):
with open(MEMORY_LIMIT_IN_BYTES_PATH) as f:
os_memory_mb = int(f.read()) / 1048576
else:
os_memory_mb = sys.maxsize
os_memory_mb = min(os_memory_mb, os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / 1048576)
# Depending on environment we take 1/4 or 1/5 of the memory, expressed in full MB's
sb_ratio = 5 if USE_KUBERNETES else 4
placeholders['postgresql']['parameters']['shared_buffers'] = '{}MB'.format(int(os_memory_mb/sb_ratio))
# # 1 connection per 30 MB, at least 100, at most 1000
placeholders['postgresql']['parameters']['max_connections'] = min(max(100, int(os_memory_mb/30)), 1000)
placeholders['instance_data'] = get_instance_metadata(provider)
placeholders['BGMON_LISTEN_IP'] = get_listen_ip()
if 'SSL_CA' in placeholders and placeholders['SSL_CA_FILE'] == '':
placeholders['SSL_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'ca.crt')
if 'SSL_CRL' in placeholders and placeholders['SSL_CRL_FILE'] == '':
placeholders['SSL_CRL_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'server.crl')
ssl_keys = ['SSL_RESTAPI_CERTIFICATE', 'SSL_RESTAPI_PRIVATE_KEY']
if not set(ssl_keys) <= set(placeholders):
placeholders['SSL_RESTAPI_CERTIFICATE_FILE'] = ''
placeholders['SSL_RESTAPI_PRIVATE_KEY_FILE'] = ''
placeholders['SSL_RESTAPI_CA_FILE'] = ''
placeholders['SSL_RESTAPI_CA'] = ''
elif 'SSL_RESTAPI_CA' in placeholders and placeholders['SSL_RESTAPI_CA_FILE'] == '':
placeholders['SSL_RESTAPI_CA_FILE'] = os.path.join(placeholders['RW_DIR'], 'certs', 'rest-api-ca.crt')
return placeholders
|
31,927 |
def query_sql_get_next_command(client, args):
next_id = args.get('id')
timeout = int(args.get('timeout', '20'))
res = client.sql_next(next_id=next_id, timeout=timeout)
output = {
"total": res.get('total', {}).get('value'),
"records": res.get('records'),
"id": res.get('id'),
"query": res.get('query')
}
readable_output = tableToMarkdown("SQL Query Results:", output)
command_results = CommandResults(
outputs_prefix='Securitytrails.SQL',
outputs_key_field=['query', 'id'],
outputs=output,
readable_output=readable_output
)
return_results(command_results)
|
def query_sql_get_next_command(client, args):
next_id = str(args.get('id'))
timeout = int(args.get('timeout', '20'))
res = client.sql_next(next_id=next_id, timeout=timeout)
output = {
"total": res.get('total', {}).get('value'),
"records": res.get('records'),
"id": res.get('id'),
"query": res.get('query')
}
readable_output = tableToMarkdown("SQL Query Results:", output)
command_results = CommandResults(
outputs_prefix='Securitytrails.SQL',
outputs_key_field=['query', 'id'],
outputs=output,
readable_output=readable_output
)
return_results(command_results)
|
31,779 |
def ip_command():
ip = demisto.args()['ip']
res = http_request('GET', f'/shodan/host/{ip}')
if not res:
demisto.results('No information available for the given IP.')
else:
hostnames = res.get('hostnames')
# It's a list, only if it exists and not empty we take the first value.
hostname = hostnames[0] if hostnames else ''
location = f'{round(res.get("latitude", 0.0), 3)},{round(res.get("longitude", 0.0), 3)}'
ip_details = {
'ASN': res.get('asn', ''),
'Address': ip,
'Hostname': hostname,
'Geo': {
'Country': res.get('country_name', ''),
'Location': location
}
}
dbot_score = {
'Indicator': ip,
'Type': 'ip',
'Vendor': 'Shodan',
'Score': 0,
}
shodan_ip_details = {
'Tag': res.get('tags', []),
'Latitude': res.get('latitude', 0.0),
'Longitude': res.get('longitude', 0.0),
'Org': res.get('org', ''),
'ASN': res.get('asn', ''),
'ISP': res.get('isp', ''),
'LastUpdate': res.get('last_update', ''),
'CountryName': res.get('country_name', ''),
'Address': ip,
'OS': res.get('os', ''),
'Port': res.get('ports', [])
}
ec = {
outputPaths['ip']: ip_details,
'DBotScore': dbot_score,
'Shodan': {
'IP': shodan_ip_details
}
}
human_readable = tableToMarkdown(f'Shodan details for IP {ip}', {
'Country': ec[outputPaths['ip']]['Geo']['Country'],
'Location': ec[outputPaths['ip']]['Geo']['Location'],
'ASN': ec[outputPaths['ip']]['ASN'],
'ISP': ec['Shodan']['IP']['ISP'],
'Ports': ', '.join([str(x) for x in ec['Shodan']['IP']['Port']]),
'Hostname': ec[outputPaths['ip']]['Hostname']
})
demisto.results({
'Type': entryTypes['note'],
'Contents': res,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
|
def ip_command():
ip = demisto.args()['ip']
res = http_request('GET', f'/shodan/host/{ip}')
if not res:
demisto.results('No information available for the given IP.')
else:
hostnames = res.get('hostnames')
# It's a list, only if it exists and not empty we take the first value.
hostname = hostnames[0] if hostnames else ''
location = f'{round(res.get("latitude", 0.0), 3)},{round(res.get("longitude", 0.0), 3)}'
ip_details = {
'ASN': res.get('asn', ''),
'Address': ip,
'Hostname': hostname,
'Geo': {
'Country': res.get('country_name', ''),
'Location': location
}
}
dbot_score = {
'Indicator': ip,
'Type': 'ip',
'Vendor': 'Shodan_v2',
'Score': 0,
}
shodan_ip_details = {
'Tag': res.get('tags', []),
'Latitude': res.get('latitude', 0.0),
'Longitude': res.get('longitude', 0.0),
'Org': res.get('org', ''),
'ASN': res.get('asn', ''),
'ISP': res.get('isp', ''),
'LastUpdate': res.get('last_update', ''),
'CountryName': res.get('country_name', ''),
'Address': ip,
'OS': res.get('os', ''),
'Port': res.get('ports', [])
}
ec = {
outputPaths['ip']: ip_details,
'DBotScore': dbot_score,
'Shodan': {
'IP': shodan_ip_details
}
}
human_readable = tableToMarkdown(f'Shodan details for IP {ip}', {
'Country': ec[outputPaths['ip']]['Geo']['Country'],
'Location': ec[outputPaths['ip']]['Geo']['Location'],
'ASN': ec[outputPaths['ip']]['ASN'],
'ISP': ec['Shodan']['IP']['ISP'],
'Ports': ', '.join([str(x) for x in ec['Shodan']['IP']['Port']]),
'Hostname': ec[outputPaths['ip']]['Hostname']
})
demisto.results({
'Type': entryTypes['note'],
'Contents': res,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
|
51,780 |
def test_internal_config_cache():
config = spack.config.Configuration()
config.push_scope(spack.config.InternalConfigScope('internal', {
'config': {
'build_jobs': 10
}
}))
config.clear_caches()
assert config.get('config:build_jobs') == 10
|
def test_internal_config_cache():
config = spack.config.Configuration()
config.push_scope(spack.config.InternalConfigScope('internal', {
'config': {
'build_jobs': 10
}
}))
# An InternalConfigScope object is constructed from data that is already
# in memory, therefore it doesn't have any cache to clear. Here we ensure
# that calling the clear method is a no-op.
internal_scope.clear()
assert config.get('config:build_jobs') == 10
|
26,760 |
def fetch_and_cache(script_url: str, output_filename: str):
"""Fetch URL to local cache and returns path."""
cache_key = _gethash(script_url)
cache_dir = _user_cache_dir("redoc-doc")
cache_metadata_filepath = os.path.join(cache_dir, "cache-metadata.json")
cache_filepath = os.path.join(cache_dir, F"{cache_key}-{output_filename}")
# Create cache directory
os.makedirs(cache_dir, exist_ok=True)
# Load cache metadata
cache_metadata: Dict[str, str] = {}
if os.path.exists(cache_metadata_filepath):
try:
with open(cache_metadata_filepath, "r") as cache_file:
cache_metadata = json.load(cache_file)
except json.JSONDecodeError:
os.remove(cache_metadata_filepath)
etag = cache_metadata.get(cache_key)
# If we have a file and etag, check the fast path
if os.path.exists(cache_filepath) and etag:
res = requests.get(script_url, headers={"If-None-Match": etag})
if res.status_code == 304:
return cache_filepath
# Slow patch
res = requests.get(script_url)
res.raise_for_status()
with open(cache_filepath, "wb") as output_file:
output_file.write(res.content)
# Save cache metadata, if needed
etag = res.headers.get('etag', None)
if etag:
cache_metadata[cache_key] = etag
with open(cache_metadata_filepath, 'w') as cache_file:
json.dump(cache_metadata, cache_file)
return cache_filepath
|
def fetch_and_cache(script_url: str, output_filename: str):
"""Fetch URL to local cache and returns path."""
cache_key = _gethash(script_url)
cache_dir = _user_cache_dir("redoc-doc")
cache_metadata_filepath = os.path.join(cache_dir, "cache-metadata.json")
cache_filepath = os.path.join(cache_dir, f"{cache_key}-{output_filename}")
# Create cache directory
os.makedirs(cache_dir, exist_ok=True)
# Load cache metadata
cache_metadata: Dict[str, str] = {}
if os.path.exists(cache_metadata_filepath):
try:
with open(cache_metadata_filepath, "r") as cache_file:
cache_metadata = json.load(cache_file)
except json.JSONDecodeError:
os.remove(cache_metadata_filepath)
etag = cache_metadata.get(cache_key)
# If we have a file and etag, check the fast path
if os.path.exists(cache_filepath) and etag:
res = requests.get(script_url, headers={"If-None-Match": etag})
if res.status_code == 304:
return cache_filepath
# Slow patch
res = requests.get(script_url)
res.raise_for_status()
with open(cache_filepath, "wb") as output_file:
output_file.write(res.content)
# Save cache metadata, if needed
etag = res.headers.get('etag', None)
if etag:
cache_metadata[cache_key] = etag
with open(cache_metadata_filepath, 'w') as cache_file:
json.dump(cache_metadata, cache_file)
return cache_filepath
|
31,350 |
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'picus-trigger-update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
29,636 |
def address_of_memoryview(mv: memoryview) -> int:
"""
Get the pointer to the first byte of a memoryview's data.
If the memoryview is read-only, NumPy must be installed.
"""
# NOTE: this method relies on pointer arithmetic to figure out
# where each memoryview starts within the underlying buffer.
# There's no direct API to get the address of a memoryview,
# so we use a trick through ctypes and the buffer protocol:
# https://mattgwwalker.wordpress.com/2020/10/15/address-of-a-buffer-in-python/
one_byte_carr = ctypes.c_byte * 1
# ^ length and type don't matter, just use it to get the address of the first byte
try:
carr = one_byte_carr.from_buffer(mv)
except TypeError:
# `mv` is read-only. `from_buffer` requires the buffer to be writeable.
# See https://bugs.python.org/issue11427 for discussion.
# This typically comes from `deserialize_bytes`, where `mv.obj` is an
# immutable bytestring.
pass
else:
return ctypes.addressof(carr)
try:
import numpy as np
except ImportError:
raise ValueError(
f"Cannot get address of read-only memoryview {mv} since NumPy is not installed."
) from None
# NumPy doesn't mind read-only buffers. We could just use this method
# for all cases, but it's nice to use the pure-Python method for the common
# case of writeable buffers (created by TCP comms, for example).
return np.asarray(mv).__array_interface__["data"][0]
|
def address_of_memoryview(mv: memoryview) -> int:
"""
Get the pointer to the first byte of a memoryview's data.
If the memoryview is read-only, NumPy must be installed.
"""
# NOTE: this method relies on pointer arithmetic to figure out
# where each memoryview starts within the underlying buffer.
# There's no direct API to get the address of a memoryview,
# so we use a trick through ctypes and the buffer protocol:
# https://mattgwwalker.wordpress.com/2020/10/15/address-of-a-buffer-in-python/
one_byte_carr = ctypes.c_byte * 1
# ^ length and type don't matter, just use it to get the address of the first byte
try:
carr = one_byte_carr.from_buffer(mv)
except TypeError:
# `mv` is read-only. `from_buffer` requires the buffer to be writeable.
# See https://bugs.python.org/issue11427 for discussion.
# This typically comes from `deserialize_bytes`, where `mv.obj` is an
# immutable bytestring.
pass
else:
return ctypes.addressof(carr)
try:
import numpy as np
except ImportError:
raise ValueError(
f"Cannot get address of read-only memoryview {mv} since NumPy is not installed."
)
# NumPy doesn't mind read-only buffers. We could just use this method
# for all cases, but it's nice to use the pure-Python method for the common
# case of writeable buffers (created by TCP comms, for example).
return np.asarray(mv).__array_interface__["data"][0]
|
48,677 |
def get_new_command(command):
new_command = 'git' + str(command.script)[len(command.script_parts[0]):]
return new_command
|
def get_new_command(command):
return 'git' + str(command.script)[len(command.script_parts[0]):]
priority = 1300
|
2,038 |
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : int
The number of seeds to choose
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : array, shape (n_clusters, n_features)
The inital centers for k-means.
indices : list, length (n_clusters)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.randint(n_samples)
indices = np.empty(n_clusters)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1,
out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
indices = [int(x) for x in indices.tolist()]
return centers, indices
|
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : int
The number of seeds to choose
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : array, shape (n_clusters, n_features)
The inital centers for k-means.
indices : list, length (n_clusters)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.randint(n_samples)
indices = np.empty(n_clusters, dtype=int)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1,
out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = distance_to_candidates.sum(axis=1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
indices = [int(x) for x in indices.tolist()]
return centers, indices
|
36,324 |
def _run_pf_algorithm(ppci, options, **kwargs):
algorithm = options["algorithm"]
ac = options["ac"]
if ac:
ref, pv, pq = bustypes(ppci["bus"], ppci["gen"])
# ----- run the powerflow -----
if pq.shape[0] == 0 and pv.shape[0] == 0:
result = _bypass_pf_and_set_results(ppci, options)
elif algorithm == 'bfsw': # forward/backward sweep power flow algorithm
result = _run_bfswpf(ppci, options, **kwargs)[0]
elif algorithm in ['nr', 'iwamoto_nr']:
result = _run_newton_raphson_pf(ppci, options)
elif algorithm in ['fdbx', 'fdxb', 'gs']: # algorithms existing within pypower
result = _runpf_pypower(ppci, options, **kwargs)[0]
else:
raise AlgorithmUnknown("Algorithm {0} is unknown!".format(algorithm))
else:
result = _run_dc_pf(ppci)
return result
|
def _run_pf_algorithm(ppci, options, **kwargs):
algorithm = options["algorithm"]
ac = options["ac"]
if ac:
_, pv, pq = bustypes(ppci["bus"], ppci["gen"])
# ----- run the powerflow -----
if pq.shape[0] == 0 and pv.shape[0] == 0:
result = _bypass_pf_and_set_results(ppci, options)
elif algorithm == 'bfsw': # forward/backward sweep power flow algorithm
result = _run_bfswpf(ppci, options, **kwargs)[0]
elif algorithm in ['nr', 'iwamoto_nr']:
result = _run_newton_raphson_pf(ppci, options)
elif algorithm in ['fdbx', 'fdxb', 'gs']: # algorithms existing within pypower
result = _runpf_pypower(ppci, options, **kwargs)[0]
else:
raise AlgorithmUnknown("Algorithm {0} is unknown!".format(algorithm))
else:
result = _run_dc_pf(ppci)
return result
|
49,111 |
def test_issue_22768():
assert solve(2*x**3 - 16*(y - 1)**6*z**3, x, simplify=False
) == [2*z*(y - 1)**2, z*(-1 + sqrt(3)*I)*(y - 1)**2,
-z*(1 + sqrt(3)*I)*(y - 1)**2]
|
def test_issue_22768():
eq = 2*x**3 - 16*(y - 1)**6*z**3
assert solve(eq.expand(), x, simplify=False
) == [2*z*(y - 1)**2, z*(-1 + sqrt(3)*I)*(y - 1)**2,
-z*(1 + sqrt(3)*I)*(y - 1)**2]
|
40,102 |
def get_query(request_parameter: ImmutableMultiDict) -> dict:
'''
Parse the query parameter from request parameters. Query is a dictionary representing a MongoDB query.
:param request_parameters: dict containing the request parameters.
:return: The MongoDB query as dict.
'''
try:
query = request_parameter.get('query')
query = json.loads(query if query else '{}')
except (AttributeError, KeyError):
return dict()
except json.JSONDecodeError:
raise ValueError('Query must be a json document')
if not isinstance(query, dict):
raise ValueError('Query must be a json document')
return query if query else dict()
|
def get_query(request_parameter: ImmutableMultiDict) -> dict:
'''
Parse the query parameter from request parameters. Query is a dictionary representing a MongoDB query.
:param request_parameter: dict containing the request parameters.
:return: The MongoDB query as dict.
'''
try:
query = request_parameter.get('query')
query = json.loads(query if query else '{}')
except (AttributeError, KeyError):
return dict()
except json.JSONDecodeError:
raise ValueError('Query must be a json document')
if not isinstance(query, dict):
raise ValueError('Query must be a json document')
return query if query else dict()
|
52,695 |
def nyquist_plot(syslist, omega=None, plot=True, omega_limits=None,
omega_num=None, label_freq=0, arrowhead_length=0.1,
arrowhead_width=0.1, color=None, *args, **kwargs):
"""
Nyquist plot for a system
Plots a Nyquist plot for the system over a (optional) frequency range.
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
plot : boolean
If True, plot magnitude
omega : array_like
Range of frequencies in rad/sec
omega_limits : array_like of two values
Limits of the to generate frequency vector.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
color : string
Used to specify the color of the line and arrowhead
label_freq : int
Label every nth frequency on the plot
arrowhead_width : float
Arrow head width
arrowhead_length : float
Arrow head length
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
real : ndarray (or list of ndarray if len(syslist) > 1))
real part of the frequency response array
imag : ndarray (or list of ndarray if len(syslist) > 1))
imaginary part of the frequency response array
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequencies in rad/s
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> real, imag, freq = nyquist_plot(sys)
"""
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in nyquist_plot; "
"use 'plot'", FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Check to see if legacy 'labelFreq' keyword was used
if 'labelFreq' in kwargs:
import warnings
warnings.warn("'labelFreq' keyword is deprecated in nyquist_plot; "
"use 'label_freq'", FutureWarning)
# Map 'labelFreq' keyword to 'label_freq' keyword
label_freq = kwargs.pop('labelFreq')
# If argument was a singleton, turn it into a list
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
# Select a default range if none is provided
if omega is None:
if omega_limits is None:
# Select a default range if none is provided
omega = default_frequency_range(syslist, Hz=False,
number_of_samples=omega_num)
else:
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
if omega_num:
num = omega_num
else:
num = config.defaults['freqplot.number_of_samples']
omega = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]), num=num,
endpoint=True)
xs, ys, omegas = [], [], []
for sys in syslist:
mag, phase, omega = sys.frequency_response(omega)
# Compute the primary curve
x = mag * np.cos(phase)
y = mag * np.sin(phase)
xs.append(x)
ys.append(y)
omegas.append(omega)
if plot:
if not sys.issiso():
# TODO: Add MIMO nyquist plots.
raise ControlMIMONotImplemented(
"Nyquist plot currently supports SISO systems.")
# Plot the primary curve and mirror image
p = plt.plot(x, y, '-', color=color, *args, **kwargs)
c = p[0].get_color()
ax = plt.gca()
# Plot arrow to indicate Nyquist encirclement orientation
ax.arrow(x[0], y[0], (x[1]-x[0])/2, (y[1]-y[0])/2, fc=c, ec=c,
head_width=arrowhead_width,
head_length=arrowhead_length)
plt.plot(x, -y, '-', color=c, *args, **kwargs)
ax.arrow(
x[-1], -y[-1], (x[-1]-x[-2])/2, (y[-1]-y[-2])/2,
fc=c, ec=c, head_width=arrowhead_width,
head_length=arrowhead_length)
# Mark the -1 point
plt.plot([-1], [0], 'r+')
# Label the frequencies of the points
if label_freq:
ind = slice(None, None, label_freq)
for xpt, ypt, omegapt in zip(x[ind], y[ind], omega[ind]):
# Convert to Hz
f = omegapt / (2 * np.pi)
# Factor out multiples of 1000 and limit the
# result to the range [-8, 8].
pow1000 = max(min(get_pow1000(f), 8), -8)
# Get the SI prefix.
prefix = gen_prefix(pow1000)
# Apply the text. (Use a space before the text to
# prevent overlap with the data.)
#
# np.round() is used because 0.99... appears
# instead of 1.0, and this would otherwise be
# truncated to 0.
plt.text(xpt, ypt, ' ' +
str(int(np.round(f / 1000 ** pow1000, 0))) + ' ' +
prefix + 'Hz')
if plot:
ax = plt.gca()
ax.set_xlabel("Real axis")
ax.set_ylabel("Imaginary axis")
ax.grid(color="lightgray")
if len(syslist) == 1:
return xs[0], ys[0], omegas[0]
else:
return xs, ys, omegas
|
def nyquist_plot(syslist, omega=None, plot=True, omega_limits=None,
omega_num=None, label_freq=0, arrowhead_length=0.1,
arrowhead_width=0.1, color=None, *args, **kwargs):
"""
Nyquist plot for a system
Plots a Nyquist plot for the system over a (optional) frequency range.
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
plot : boolean
If True, plot magnitude
omega : array_like
Range of frequencies in rad/sec
omega_limits : array_like of two values
Limits to the range of frequencies. Ignored if omega
is provided, and auto-generated if omitted.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
color : string
Used to specify the color of the line and arrowhead
label_freq : int
Label every nth frequency on the plot
arrowhead_width : float
Arrow head width
arrowhead_length : float
Arrow head length
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
real : ndarray (or list of ndarray if len(syslist) > 1))
real part of the frequency response array
imag : ndarray (or list of ndarray if len(syslist) > 1))
imaginary part of the frequency response array
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequencies in rad/s
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> real, imag, freq = nyquist_plot(sys)
"""
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in nyquist_plot; "
"use 'plot'", FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Check to see if legacy 'labelFreq' keyword was used
if 'labelFreq' in kwargs:
import warnings
warnings.warn("'labelFreq' keyword is deprecated in nyquist_plot; "
"use 'label_freq'", FutureWarning)
# Map 'labelFreq' keyword to 'label_freq' keyword
label_freq = kwargs.pop('labelFreq')
# If argument was a singleton, turn it into a list
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
# Select a default range if none is provided
if omega is None:
if omega_limits is None:
# Select a default range if none is provided
omega = default_frequency_range(syslist, Hz=False,
number_of_samples=omega_num)
else:
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
if omega_num:
num = omega_num
else:
num = config.defaults['freqplot.number_of_samples']
omega = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]), num=num,
endpoint=True)
xs, ys, omegas = [], [], []
for sys in syslist:
mag, phase, omega = sys.frequency_response(omega)
# Compute the primary curve
x = mag * np.cos(phase)
y = mag * np.sin(phase)
xs.append(x)
ys.append(y)
omegas.append(omega)
if plot:
if not sys.issiso():
# TODO: Add MIMO nyquist plots.
raise ControlMIMONotImplemented(
"Nyquist plot currently supports SISO systems.")
# Plot the primary curve and mirror image
p = plt.plot(x, y, '-', color=color, *args, **kwargs)
c = p[0].get_color()
ax = plt.gca()
# Plot arrow to indicate Nyquist encirclement orientation
ax.arrow(x[0], y[0], (x[1]-x[0])/2, (y[1]-y[0])/2, fc=c, ec=c,
head_width=arrowhead_width,
head_length=arrowhead_length)
plt.plot(x, -y, '-', color=c, *args, **kwargs)
ax.arrow(
x[-1], -y[-1], (x[-1]-x[-2])/2, (y[-1]-y[-2])/2,
fc=c, ec=c, head_width=arrowhead_width,
head_length=arrowhead_length)
# Mark the -1 point
plt.plot([-1], [0], 'r+')
# Label the frequencies of the points
if label_freq:
ind = slice(None, None, label_freq)
for xpt, ypt, omegapt in zip(x[ind], y[ind], omega[ind]):
# Convert to Hz
f = omegapt / (2 * np.pi)
# Factor out multiples of 1000 and limit the
# result to the range [-8, 8].
pow1000 = max(min(get_pow1000(f), 8), -8)
# Get the SI prefix.
prefix = gen_prefix(pow1000)
# Apply the text. (Use a space before the text to
# prevent overlap with the data.)
#
# np.round() is used because 0.99... appears
# instead of 1.0, and this would otherwise be
# truncated to 0.
plt.text(xpt, ypt, ' ' +
str(int(np.round(f / 1000 ** pow1000, 0))) + ' ' +
prefix + 'Hz')
if plot:
ax = plt.gca()
ax.set_xlabel("Real axis")
ax.set_ylabel("Imaginary axis")
ax.grid(color="lightgray")
if len(syslist) == 1:
return xs[0], ys[0], omegas[0]
else:
return xs, ys, omegas
|
7,918 |
def test_cell_translation(pincell_model_w_univ, mpi_intracomm):
openmc.lib.finalize()
openmc.lib.init(intracomm=mpi_intracomm)
openmc.lib.simulation_init()
# Cell 1 is filled with a material so it has a translation, but we can't
# set it.
cell = openmc.lib.cells[1]
assert cell.get_translation() == pytest.approx([0., 0., 0.])
with pytest.raises(exc.GeometryError, match='not filled with'):
cell.set_translation(np.array([1., 0., -1.]))
# Cell 2 was given a universe, so we can assign it a translation vector
cell = openmc.lib.cells[2]
assert cell.get_translation() == pytest.approx([0., 0., 0.])
# This time we *can* set it
cell.set_translation(np.array([1., 0., -1.]))
assert cell.get_translation() == pytest.approx([1., 0., -1.])
|
def test_cell_translation(pincell_model_w_univ, mpi_intracomm):
openmc.lib.finalize()
openmc.lib.init(intracomm=mpi_intracomm)
openmc.lib.simulation_init()
# Cell 1 is filled with a material so it has a translation, but we can't
# set it.
cell = openmc.lib.cells[1]
assert cell.get_translation() == pytest.approx([0., 0., 0.])
with pytest.raises(exc.GeometryError, match='not filled with'):
cell.set_translation(np.array([1., 0., -1.]))
# Cell 2 was given a universe, so we can assign it a translation vector
cell = openmc.lib.cells[2]
assert cell.get_translation() == pytest.approx([0., 0., 0.])
# This time we *can* set it
cell.set_translation((1., 0., -1.))
assert cell.get_translation() == pytest.approx([1., 0., -1.])
|
4,307 |
def read_raw_nedf(filename):
"""
Read NeuroElectrics .nedf files.
NEDF file versions starting from 1.3 are supported.
Parameters
----------
filename : str
Path to the .nedf file
Returns
-------
raw : instance of RawNedf
A Raw object containing NEDF data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawNedf(filename)
|
def read_raw_nedf(filename):
"""
Read NeuroElectrics .nedf files.
NEDF file versions starting from 1.3 are supported.
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
raw : instance of RawNedf
A Raw object containing NEDF data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawNedf(filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.