code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import typing as t
from typing import Union
from pathlib import Path
from zhinst.toolkit.driver.modules.device_settings_module import (
DeviceSettingsModule as TKDeviceSettingsModule,
)
from zhinst.qcodes.driver.modules.base_module import ZIBaseModule
from zhinst.qcodes.qcodes_adaptions import (
NodeDict,
)
if t.TYPE_CHECKING:
from zhinst.qcodes.driver.devices import DeviceType
from zhinst.qcodes.session import Session
class ZIDeviceSettingsModule(ZIBaseModule):
"""Implements the device settings module for storing and loading settings.
The Device Settings Module provides functionality for saving and loading
device settings to and from file. The file is saved in XML format.
For simple save and load two helper functions exist `save_to_file` and
`load_from_file`.
Note: It is not recommend to use this function to read the
device settings. Instead one can use the zhinst-toolkit functionality
to read all settings from a device/subtree from the device directly by
calling it.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/device_settings_module.html
Args:
tk_object: Underlying zhinst-toolkit object.
session: Session to the Data Server.
name: Name of the module in QCoDeS.
"""
def __init__(
self,
tk_object: TKDeviceSettingsModule,
session: "Session",
name: str = "device_settings_module",
):
super().__init__(tk_object, session, name)
def load_from_file(
self,
filename: Union[str, Path],
device: Union["DeviceType", str],
timeout: float = 30,
) -> None:
"""Load a LabOne settings file to a device.
This function creates an new module instance to avoid misconfiguration.
It is also synchronous, meaning it will block until loading the
settings has finished.
Args:
filename: The path to the settings file.
device: The device to load the settings to.
timeout: Max time to wait for the loading to finish.
Raises:
TimeoutError: If the loading of the settings timed out.
"""
return self._tk_object.load_from_file(
filename=filename, device=device, timeout=timeout
)
def save_to_file(
self,
filename: Union[str, Path],
device: Union["DeviceType", str],
timeout: int = 30,
) -> None:
"""Save the device settings to a LabOne settings file.
This function creates an new module instance to avoid misconfiguration.
It is also synchronous, meaning it will block until save operation has
finished.
Args:
filename: The path to the settings file.
device: The device which settings should be saved.
timeout: Max time to wait for the loading to finish.
Raises:
TimeoutError: If the loading of the settings timed out.
"""
return self._tk_object.save_to_file(
filename=filename, device=device, timeout=timeout
)
def read(self) -> NodeDict:
"""Read device settings.
Note: It is not recommend to use this function to read the
device settings. Instead one can use the zhinst-toolkit functionality
to read all settings from a device/subtree from the device directly by
calling it.
>>> device = session.connect_device()
>>> ...
>>> device()
<all device settings>
>>> device.demods()
<all demodulator settings>
Returns:
Device settings.
"""
return NodeDict(self._tk_object.read()) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/modules/device_settings_module.py | device_settings_module.py |
import typing as t
from zhinst.toolkit.driver.modules.daq_module import DAQModule as TKDAQModule
from zhinst.qcodes.driver.modules.base_module import ZIBaseModule
from zhinst.qcodes.qcodes_adaptions import (
NodeDict,
)
if t.TYPE_CHECKING:
from zhinst.qcodes.session import Session
class ZIDAQModule(ZIBaseModule):
"""Data Acquisition Module.
The Data Acquisition Module corresponds to the Data Acquisition tab of the
LabOne User Interface. It enables the user to record and align time and
frequency domain data from multiple instrument signal sources at a defined
data rate. The data may be recorded either continuously or in bursts based
upon trigger criteria analogous to the functionality provided by laboratory
oscilloscopes.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/data_acquisition_module.html
Args:
tk_object: Underlying zhinst-toolkit object.
session: Session to the Data Server.
name: Name of the module in QCoDeS.
"""
def __init__(
self, tk_object: TKDAQModule, session: "Session", name: str = "daq_module"
):
super().__init__(tk_object, session, name)
self._tk_object.root.update_nodes(
{
"/triggernode": {
"GetParser": lambda value: self._get_node(value),
"SetParser": lambda value: self._set_node(value),
}
},
raise_for_invalid_node=False,
)
def finish(self) -> None:
"""Stop the module.
.. versionadded:: 0.5.0
"""
return self._tk_object.finish()
def finished(self) -> bool:
"""Check if the acquisition has finished.
Returns:
Flag if the acquisition has finished.
.. versionadded:: 0.5.0
"""
return self._tk_object.finished()
def trigger(self) -> None:
"""Execute a manual trigger.
.. versionadded:: 0.5.0
"""
return self._tk_object.trigger()
def read(self, *, raw: bool = False, clk_rate: float = 60000000.0) -> NodeDict:
"""Read the acquired data from the module.
The data is split into bursts.
Args:
raw: Flag if the acquired data from the subscribed device
device nodes should be converted into the DAQResult format
(raw = False) or not. (default = False)
clk_rate: Clock rate [Hz] for converting the timestamps. Only
applies if the raw flag is reset.
Returns:
Result of the burst grouped by the signals.
"""
return NodeDict(self._tk_object.read(raw=raw, clk_rate=clk_rate)) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/modules/daq_module.py | daq_module.py |
import typing as t
from zhinst.toolkit.driver.modules import ModuleType as TKModuleType
from zhinst.toolkit.driver.modules.base_module import BaseModule as TKBaseModule
from zhinst.toolkit.driver.modules.base_module import ZIModule
from zhinst.toolkit.nodetree import Node as TKNode
from zhinst.qcodes.qcodes_adaptions import (
ZIParameter,
NodeDict,
ZIInstrument,
init_nodetree,
tk_node_to_parameter,
)
if t.TYPE_CHECKING:
from zhinst.qcodes.driver.devices import DeviceType
from zhinst.qcodes.session import Session
class ZIBaseModule(ZIInstrument):
"""Generic toolkit driver for a LabOne Modules.
All module specific class are derived from this class.
It exposes the nodetree and also implements common functions valid for all
modules.
It also can be used directly, e.g. for modules that have no special class
in toolkit.
Args:
tk_object: Underlying zhinst-toolkit object.
session: Session to the Data Server.
name: Name of the module in QCoDeS.
"""
def __init__(self, tk_object: TKModuleType, session: "Session", name: str):
self._tk_object = tk_object
self._session = session
super().__init__(
f"zi_{name}_{len(self.instances())}", tk_object.root, is_module=True
)
init_nodetree(self, self._tk_object, self._snapshot_cache)
self._tk_object.root.update_nodes(
{
"/device": {
"GetParser": lambda value: self._get_device(value),
}
},
raise_for_invalid_node=False,
)
def _get_device(self, serial: str) -> t.Union["DeviceType", str]:
"""Convert a device serial into a QCoDeS device object.
Args:
serial: Serial of the device
Returns:
QCoDeS device object. If the serial does not
match to a connected device the serial is returned instead.
"""
try:
return self._session.devices[serial]
except (RuntimeError, KeyError):
return serial
def _get_node(self, node: str) -> t.Union[ZIParameter, str]:
"""Convert a raw node string into a qcodes node.
Args:
node (str): raw node string
Returns:
Node: qcodes node. (if the node can not be converted the raw node
string is returned)
"""
tk_node = self._tk_object._get_node(node)
if isinstance(tk_node, str):
return tk_node
device = self._session.devices[tk_node.root.prefix_hide]
return tk_node_to_parameter(device, tk_node)
@staticmethod
def _set_node(signal: t.Union[ZIParameter, TKNode, str]) -> str:
"""Convert a toolkit node into a raw node string.
Args:
signal (Union[Node,str]): node
Returns:
str: raw string node
"""
try:
node = signal.zi_node # type: ignore[union-attr]
except AttributeError:
node = TKBaseModule._set_node(signal)
return node
def subscribe(self, signal: t.Union[ZIParameter, str]):
"""Subscribe to a node.
The node can either be a node of this module or of a connected device.
Args:
signal (Node): node that should be subscribed.
"""
try:
self._tk_object.subscribe(signal.zi_node) # type: ignore[union-attr]
except AttributeError:
self._tk_object.subscribe(signal)
def unsubscribe(self, signal: t.Union[ZIParameter, str]):
"""Unsubscribe from a node.
The node can either be a node of this module or of a connected device.
Args:
signal (Node): node that should be unsubscribe.
"""
try:
self._tk_object.unsubscribe(signal.zi_node) # type: ignore[union-attr]
except AttributeError:
self._tk_object.unsubscribe(signal)
@property
def raw_module(self) -> ZIModule: # type: ignore [type-var]
"""Underlying zhinst.core module."""
return self._tk_object.raw_module
def wait_done(self, *, timeout: float = 20.0, sleep_time: float = 0.5) -> None:
"""Waits until the module is finished.
Warning: Only usable for modules that make use of the `/finished` node.
Args:
timeout (float): The maximum waiting time in seconds for the
measurement (default: 20).
sleep_time (int): Time in seconds to wait between
requesting sweeper state. (default: 0.5)
Raises:
TimeoutError: The measurement is not completed before
timeout.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def progress(self) -> float:
"""Progress of the execution.
Returns:
Progress of the execution with a number between 0 and 1
"""
return self._tk_object.progress()
def execute(self) -> None:
"""Start the module execution.
Subscription or unsubscription is not possible until the execution is
finished.
.. versionadded:: 0.4.1
"""
return self._tk_object.execute()
def read(self) -> NodeDict:
"""Read scope data.
If the recording is still ongoing only a subset of data is returned.
Returns:
Scope data.
"""
return NodeDict(self._tk_object.read()) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/modules/base_module.py | base_module.py |
import typing as t
from zhinst.toolkit.driver.modules.shfqa_sweeper import SHFQASweeper as TKSHFQASweeper
from zhinst.qcodes.qcodes_adaptions import init_nodetree, ZIInstrument
if t.TYPE_CHECKING:
from zhinst.qcodes.driver.devices import DeviceType
from zhinst.qcodes.session import Session
class ZISHFQASweeper(ZIInstrument):
"""QCoDeS adaption for the zhinst.utils.SHFSweeper.
For now the general sweeper module does not support the SHFQA. However a
python based implementation called ``SHFSweeper`` does already provide
this functionality. The ``SHFSweeper`` is part of the ``zhinst`` module
and can be found in the utils.
Toolkit wraps around the ``SHFSweeper`` and exposes a interface that is
similar to the LabOne modules, meaning the parameters are exposed in a
node tree like structure.
All parameters can be accessed through their corresponding node:
* device: Device to run the sweeper with
* sweep: Frequency range settings for a sweep
* rf: RF in- and output settings for a sweep
* average: Averaging settings for a sweep
* trigger: Settings for the trigger
* envelope: Settings for defining a complex envelope for pulsed spectroscopy
The underlying module is updated with the parameter changes automatically.
Every functions from the underlying SHFSweeper module is exposed and can be
used in the same way.
Args:
tk_object: Instance of the toolkit shfqa sweeper.
session: Session to the Data Server.
"""
def __init__(self, tk_object: TKSHFQASweeper, session: "Session"):
super().__init__(
f"zi_shfqasweeper_{len(self.instances())}", tk_object.root, is_module=True
)
self._tk_object = tk_object
self._session = session
init_nodetree(self, self._tk_object, self._snapshot_cache)
self._tk_object.root.update_nodes(
{"/device": {"GetParser": lambda value: self._get_device(value)}}
)
def _get_device(self, serial: str) -> t.Union["DeviceType", str]:
"""Convert a device serial into a QCoDeS device object.
Args:
serial: Serial of the device
Returns:
QCoDeS device object. If the serial does not
match to a connected device the serial is returned instead.
"""
try:
return self._session.devices[serial]
except (RuntimeError, KeyError):
return serial
def run(self) -> dict:
"""Perform a sweep with the specified settings.
This method eventually wraps around the `run` method of
`zhinst.utils.shf_sweeper`
Returns:
A dictionary with measurement data of the last sweep.
"""
return self._tk_object.run()
def get_result(self) -> dict:
"""Get the measurement data of the last sweep.
This method eventually wraps around the `get_result` method of
`zhinst.utils.shf_sweeper`
Returns:
A dictionary with measurement data of the last sweep.
"""
return self._tk_object.get_result()
def plot(self) -> None:
"""Plot power over frequency for last sweep.
This method eventually wraps around the `plot` method of
`zhinst.utils.shf_sweeper`
"""
return self._tk_object.plot()
def get_offset_freq_vector(self) -> t.Any:
"""Get vector of frequency points.
This method wraps around the `get_offset_freq_vector` method of
`zhinst.utils.shf_sweeper`
Returns:
Vector of frequency points.
"""
return self._tk_object.get_offset_freq_vector() | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/modules/shfqa_sweeper.py | shfqa_sweeper.py |
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from zhinst.toolkit import Sequence, Waveforms
from zhinst.toolkit.interface import AveragingMode, SHFQAChannelMode
from zhinst.utils.shfqa.multistate import QuditSettings
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
from zhinst.qcodes.qcodes_adaptions import ZINode, ZIChannelList
class Generator(ZINode):
"""Generator node.
Implements basic functionality of the generator allowing the user to write
and upload their *'.seqC'* code.
In contrast to other AWG Sequencers, e.g. from the HDAWG, SHFSG
it does not provide writing access to the Waveform Memories
and hence does not come with predefined waveforms such as `gauss`
or `ones`. Therefore, all waveforms need to be defined in Python
and uploaded to the device using `upload_waveforms` method.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
daq_server: Instance of the ziDAQServer
serial: Serial of the device.
index: Index of the corresponding awg channel
max_qubits_per_channel: Max qubits per channel
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "generator", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
return self._tk_object.enable_sequencer(single=single)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def compile_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Tuple[bytes, Dict[str, Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
return self._tk_object.compile_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def load_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Dict[str, Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
return self._tk_object.load_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def write_to_waveform_memory(
self, pulses: Union[Waveforms, dict], *, clear_existing: bool = True
) -> None:
"""Writes pulses to the waveform memory.
Args:
pulses: Waveforms that should be uploaded.
clear_existing: Flag whether to clear the waveform memory before the
present upload. (default = True)
"""
return self._tk_object.write_to_waveform_memory(
pulses=pulses, clear_existing=clear_existing
)
def read_from_waveform_memory(self, slots: List[int] = None) -> Waveforms:
"""Read pulses from the waveform memory.
Args:
slots: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Mutable mapping of the downloaded waveforms.
"""
return self._tk_object.read_from_waveform_memory(slots=slots)
def configure_sequencer_triggering(
self, *, aux_trigger: str, play_pulse_delay: float = 0.0
) -> None:
"""Configure the sequencer triggering.
Args:
aux_trigger: Alias for the trigger source used in the sequencer.
For the list of available values, use `available_aux_trigger_inputs`
play_pulse_delay: Delay in seconds before the start of waveform playback.
"""
return self._tk_object.configure_sequencer_triggering(
aux_trigger=aux_trigger, play_pulse_delay=play_pulse_delay
)
@property
def available_aux_trigger_inputs(self) -> List:
"""List of available aux trigger sources for the generator."""
return self._tk_object.available_aux_trigger_inputs
class Qudit(ZINode):
"""Single Qudit node.
Implements basic functionality of a single qudit node, e.g applying the
basic configuration.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
readout_channel: Index of the readout channel this qudit belongs to.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"qudit_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
def configure(self, qudit_settings: QuditSettings, enable: bool = True) -> None:
"""Compiles a list of transactions to apply the qudit settings to the device.
Args:
qudit_settings: The qudit settings to be configured.
enable: Whether to enable the qudit. (default: True)
"""
return self._tk_object.configure(qudit_settings=qudit_settings, enable=enable)
class MultiState(ZINode):
"""MultiState node.
Implements basic functionality of the MultiState node.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
index: Index of the corresponding readout channel.
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "multistate", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.qudits:
channel_list = ZIChannelList(
self,
"qudits",
Qudit,
zi_node=self._tk_object.qudits.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.qudits):
channel_list.append(
Qudit(
self,
x,
i,
zi_node=self._tk_object.qudits[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("qudits", channel_list)
def get_qudits_results(self) -> Dict[int, np.ndarray]:
"""Downloads the qudit results from the device and group them by qudit.
This function accesses the multistate nodes to determine which
integrators were used for which qudit to able to group the results by
qudit.
Returns:
A dictionary with the qudit index keys and result vector values.
"""
return self._tk_object.get_qudits_results()
class Readout(ZINode):
"""Readout node.
Implements basic functionality of the readout, e.g allowing the user to
write the integration weight.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
serial: Serial of the device.
index: Index of the corresponding awg channel
max_qubits_per_channel: Max qubits per channel
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "readout", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.multistate:
self.add_submodule(
"multistate",
MultiState(
self,
self._tk_object.multistate,
zi_node=self._tk_object.multistate.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def configure_result_logger(
self,
*,
result_source: str,
result_length: int,
num_averages: int = 1,
averaging_mode: AveragingMode = AveragingMode.CYCLIC,
) -> None:
"""Configures the result logger for readout mode.
Args:
result_source: String-based tag to select the result source in readout
mode, e.g. "result_of_integration" or "result_of_discrimination".
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n
averaging_mode: Select the averaging order of the result, with
0 = cyclic and 1 = sequential.
"""
return self._tk_object.configure_result_logger(
result_source=result_source,
result_length=result_length,
num_averages=num_averages,
averaging_mode=averaging_mode,
)
def run(self) -> None:
"""Reset and enable the result logger."""
return self._tk_object.run()
def stop(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Stop the result logger.
Args:
timeout: The maximum waiting time in seconds for the Readout
(default: 10).
sleep_time: Sleep interval in seconds. (default = 0.05)
Raises:
TimeoutError: The result logger could not been stopped within the
given time.
"""
return self._tk_object.stop(timeout=timeout, sleep_time=sleep_time)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Wait until the readout is finished.
Args:
timeout: The maximum waiting time in seconds for the Readout
(default: 10).
sleep_time: Sleep interval in seconds. (default = 0.05)
Raises:
TimeoutError: if the readout recording is not completed within the
given time.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def read(self, *, timeout: float = 10) -> np.array:
"""Waits until the logger finished recording and returns the measured data.
Args:
timeout: Maximum time to wait for data in seconds (default = 10s)
Returns:
Result logger data.
"""
return self._tk_object.read(timeout=timeout)
def write_integration_weights(
self,
weights: Union[Waveforms, dict],
*,
integration_delay: float = 0.0,
clear_existing: bool = True,
) -> None:
"""Configures the weighted integration.
Args:
weights: Dictionary containing the complex weight vectors, where
keys correspond to the indices of the integration units to be
configured.
integration_delay: Delay in seconds before starting the readout.
(default = 0.0)
clear_existing: Flag whether to clear the waveform memory before
the present upload. (default = True)
"""
return self._tk_object.write_integration_weights(
weights=weights,
integration_delay=integration_delay,
clear_existing=clear_existing,
)
def read_integration_weights(self, slots: List[int] = None) -> Waveforms:
"""Read integration weights from the waveform memory.
Args:
slots: List of weight slots to read from the device. If not specified
all available weights will be downloaded.
Returns:
Mutable mapping of the downloaded weights.
"""
return self._tk_object.read_integration_weights(slots=slots)
class Spectroscopy(ZINode):
"""Spectroscopy node.
Implements basic functionality of the spectroscopy, e.g allowing the user to
read the result logger data.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
serial: Serial of the device.
index: Index of the corresponding awg channel
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "spectroscopy", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def configure_result_logger(
self,
*,
result_length: int,
num_averages: int = 1,
averaging_mode: AveragingMode = AveragingMode.CYCLIC,
) -> None:
"""Configures the result logger for spectroscopy mode.
Args:
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n.
averaging_mode: Averaging order of the result.
"""
return self._tk_object.configure_result_logger(
result_length=result_length,
num_averages=num_averages,
averaging_mode=averaging_mode,
)
def run(self) -> None:
"""Resets and enables the spectroscopy result logger."""
return self._tk_object.run()
def stop(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Stop the result logger.
Args:
timeout: The maximum waiting time in seconds for the
Spectroscopy (default: 10).
sleep_time: Time in seconds to wait between
requesting Spectroscopy state
Raises:
TimeoutError: If the result logger could not been stopped within the
given time.
"""
return self._tk_object.stop(timeout=timeout, sleep_time=sleep_time)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Wait until spectroscopy is finished.
Args:
timeout (float): The maximum waiting time in seconds for the
Spectroscopy (default: 10).
sleep_time (float): Time in seconds to wait between
requesting Spectroscopy state
Raises:
TimeoutError: if the spectroscopy recording is not completed within the
given time.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def read(self, *, timeout: float = 10) -> np.array:
"""Waits until the logger finished recording and returns the measured data.
Args:
timeout: Maximum time to wait for data in seconds (default = 10s)
Returns:
An array containing the result logger data.
"""
return self._tk_object.read(timeout=timeout)
class QAChannel(ZINode):
"""Quantum Analyzer Channel for the SHFQA.
:class:`QAChannel` implements basic functionality to configure QAChannel
settings of the :class:`SHFQA` instrument.
Besides the :class:`Generator`, :class:`Readout` and :class:`Sweeper`
modules it also provides an easy access to commonly used `QAChannel` parameters.
Args:
device: SHFQA device object.
session: Underlying session.
tree: Node tree (node path as tuple) of the corresponding node.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"qachannel_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
if self._tk_object.generator:
self.add_submodule(
"generator",
Generator(
self,
self._tk_object.generator,
zi_node=self._tk_object.generator.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
if self._tk_object.readout:
self.add_submodule(
"readout",
Readout(
self,
self._tk_object.readout,
zi_node=self._tk_object.readout.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
if self._tk_object.spectroscopy:
self.add_submodule(
"spectroscopy",
Spectroscopy(
self,
self._tk_object.spectroscopy,
zi_node=self._tk_object.spectroscopy.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def configure_channel(
self,
*,
input_range: int,
output_range: int,
center_frequency: float,
mode: SHFQAChannelMode,
) -> None:
"""Configures the RF input and output of a specified channel.
Args:
input_range: Maximal range of the signal input power in dBm
output_range: Maximal range of the signal output power in dBm
center_frequency: Center frequency of the analysis band [Hz]
mode: Select between spectroscopy and readout mode.
"""
return self._tk_object.configure_channel(
input_range=input_range,
output_range=output_range,
center_frequency=center_frequency,
mode=mode,
)
class SHFScope(ZINode):
"""SHFQA Scope Node.
Implements basic functionality of the scope node, e.g allowing the user to
read the data.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
daq_server: Instance of the ziDAQServer
serial: Serial of the device.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"shfscope_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
def run(
self, *, single: bool = True, timeout: float = 10, sleep_time: float = 0.005
) -> None:
"""Run the scope recording.
Args:
timeout: The maximum waiting time in seconds for the Scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not start within the specified
timeout.
"""
return self._tk_object.run(
single=single, timeout=timeout, sleep_time=sleep_time
)
def stop(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Stop the scope recording.
Args:
timeout: The maximum waiting time in seconds for the scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not stop within the specified
timeout.
"""
return self._tk_object.stop(timeout=timeout, sleep_time=sleep_time)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the scope recording is finished.
Args:
timeout: The maximum waiting time in seconds for the Scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not finish within the specified
timeout.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def configure(
self,
*,
input_select: Dict[int, str],
num_samples: int,
trigger_input: str,
num_segments: int = 1,
num_averages: int = 1,
trigger_delay: float = 0,
) -> None:
"""Configures the scope for a measurement.
Args:
input_select: Map of a specific scope channel an their signal
source, e.g. "channel0_signal_input". (For a list of available
values use `available_inputs`)
num_samples: Number samples to recorded in a scope shot.
trigger_input: Specifies the trigger source of the scope
acquisition - if set to None, the self-triggering mode of the
scope becomes active, which is useful e.g. for the GUI.
For a list of available trigger values use
`available_trigger_inputs`.
num_segments: Number of distinct scope shots to be returned after
ending the acquisition.
num_averages: Specifies how many times each segment should be
averaged on hardware; to finish a scope acquisition, the number
of issued triggers must be equal to num_segments * num_averages.
trigger_delay: delay in samples specifying the time between the
start of data acquisition and reception of a trigger.
"""
return self._tk_object.configure(
input_select=input_select,
num_samples=num_samples,
trigger_input=trigger_input,
num_segments=num_segments,
num_averages=num_averages,
trigger_delay=trigger_delay,
)
def read(self, *, timeout: float = 10) -> tuple:
"""Read out the recorded data from the scope.
Args:
timeout: The maximum waiting time in seconds for the
Scope (default: 10).
Returns:
(recorded_data, recorded_data_range, scope_time)
Raises:
TimeoutError: if the scope recording is not completed before
timeout.
"""
return self._tk_object.read(timeout=timeout)
@property
def available_trigger_inputs(self) -> List:
"""List of the available trigger sources for the scope."""
return self._tk_object.available_trigger_inputs
@property
def available_inputs(self) -> List:
"""List of the available signal sources for the scope channels."""
return self._tk_object.available_inputs
class SHFQA(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments SHFQA."""
def _init_additional_nodes(self):
"""Init class specific modules and parameters."""
if self._tk_object.qachannels:
channel_list = ZIChannelList(
self,
"qachannels",
QAChannel,
zi_node=self._tk_object.qachannels.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.qachannels):
channel_list.append(
QAChannel(
self,
x,
i,
zi_node=self._tk_object.qachannels[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("qachannels", channel_list)
if self._tk_object.scopes:
channel_list = ZIChannelList(
self,
"scopes",
SHFScope,
zi_node=self._tk_object.scopes.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.scopes):
channel_list.append(
SHFScope(
self,
x,
i,
zi_node=self._tk_object.scopes[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("scopes", channel_list)
def factory_reset(self, *, deep: bool = True) -> None:
"""Load the factory default settings.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after loading the factory preset (default: True).
"""
return self._tk_object.factory_reset(deep=deep)
def start_continuous_sw_trigger(
self, *, num_triggers: int, wait_time: float
) -> None:
"""Issues a specified number of software triggers.
Issues a specified number of software triggers with a certain wait time
in between. The function guarantees reception and proper processing of
all triggers by the device, but the time between triggers is
non-deterministic by nature of software triggering. Only use this
function for prototyping and/or cases without strong timing requirements.
Args:
num_triggers: Number of triggers to be issued
wait_time: Time between triggers in seconds
"""
return self._tk_object.start_continuous_sw_trigger(
num_triggers=num_triggers, wait_time=wait_time
)
@property
def max_qubits_per_channel(self) -> int:
"""Maximum number of supported qubits per channel."""
return self._tk_object.max_qubits_per_channel | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/shfqa.py | shfqa.py |
from typing import Any, Dict, List, Tuple, Union
from zhinst.toolkit import CommandTable, Waveforms, Sequence
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
from zhinst.qcodes.qcodes_adaptions import ZINode, ZIChannelList
class CommandTableNode(ZINode):
"""CommandTable node.
This class implements the basic functionality of the command table allowing
the user to load and upload their own command table.
A dedicated class called ``CommandTable`` exists that is the preferred way
to create a valid command table. For more information about the
``CommandTable`` refer to the corresponding example or the documentation
of that class directly.
Args:
root: Node used for the upload of the command table
tree: Tree (node path as tuple) of the current node
device_type: Device type.
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "commandtable", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def check_status(self) -> bool:
"""Check status of the command table.
Returns:
Flag if a valid command table is loaded into the device.
Raises:
RuntimeError: If the command table upload into the device failed.
"""
return self._tk_object.check_status()
def load_validation_schema(self) -> Dict[str, Any]:
"""Load device command table validation schema.
Returns:
JSON validation schema for the device command tables.
"""
return self._tk_object.load_validation_schema()
def upload_to_device(
self,
ct: Union[CommandTable, str, dict],
*,
validate: bool = False,
check_upload: bool = True,
) -> None:
"""Upload command table into the device.
The command table can either be specified through the dedicated
``CommandTable`` class or in a raw format, meaning a json string or json
dict. In the case of a json string or dict the command table is
validated by default against the schema provided by the device.
Args:
ct: Command table.
validate: Flag if the command table should be validated. (Only
applies if the command table is passed as a raw json string or
json dict)
check_upload: Flag if the upload should be validated by calling
`check_status`. This is not mandatory bat strongly recommended
since the device does not raise an error when it rejects the
command table. This Flag is ignored when called from within a
transaction.
Raises:
RuntimeError: If the command table upload into the device failed.
zhinst.toolkit.exceptions.ValidationError: Incorrect schema.
.. versionchanged:: 0.4.2
New Flag `check_upload` that makes the upload check optional.
`check_status` is only called when not in a ongoing transaction.
"""
return self._tk_object.upload_to_device(
ct=ct, validate=validate, check_upload=check_upload
)
def load_from_device(self) -> CommandTable:
"""Load command table from the device.
Returns:
command table.
"""
return self._tk_object.load_from_device()
class AWG(ZINode):
"""AWG node.
This class implements the basic functionality for the device specific
arbitrary waveform generator.
Besides the upload/compilation of sequences it offers the upload of
waveforms and command tables.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
session: Underlying session.
serial: Serial of the device.
index: Index of the corresponding awg channel
device_type: Device type
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, f"awg_{index}", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.commandtable:
self.add_submodule(
"commandtable",
CommandTableNode(
self,
self._tk_object.commandtable,
zi_node=self._tk_object.commandtable.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
return self._tk_object.enable_sequencer(single=single)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def compile_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Tuple[bytes, Dict[str, Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
return self._tk_object.compile_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def load_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Dict[str, Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
return self._tk_object.load_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def write_to_waveform_memory(
self, waveforms: Waveforms, indexes: list = None
) -> None:
"""Writes waveforms to the waveform memory.
The waveforms must already be assigned in the sequencer program.
Args:
waveforms: Waveforms that should be uploaded.
indexes: Specify a list of indexes that should be uploaded. If
nothing is specified all available indexes in waveforms will
be uploaded. (default = None)
.. versionchanged:: 0.4.2
Removed `validate` flag and functionality. The validation check is
now done in the `Waveforms.validate` function.
"""
return self._tk_object.write_to_waveform_memory(
waveforms=waveforms, indexes=indexes
)
def read_from_waveform_memory(self, indexes: List[int] = None) -> Waveforms:
"""Read waveforms from the waveform memory.
Args:
indexes: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Waveform object with the downloaded waveforms.
"""
return self._tk_object.read_from_waveform_memory(indexes=indexes)
class UHFLI(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments UHFLI."""
def _init_additional_nodes(self):
"""Init class specific modules and parameters."""
if self._tk_object.awgs:
channel_list = ZIChannelList(
self,
"awgs",
AWG,
zi_node=self._tk_object.awgs.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.awgs):
channel_list.append(
AWG(
self,
x,
i,
zi_node=self._tk_object.awgs[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("awgs", channel_list) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/uhfli.py | uhfli.py |
from typing import Any, Dict, List, Tuple, Union
from zhinst.toolkit import CommandTable, Waveforms, Sequence
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
from zhinst.qcodes.qcodes_adaptions import ZINode, ZIChannelList
class CommandTableNode(ZINode):
"""CommandTable node.
This class implements the basic functionality of the command table allowing
the user to load and upload their own command table.
A dedicated class called ``CommandTable`` exists that is the preferred way
to create a valid command table. For more information about the
``CommandTable`` refer to the corresponding example or the documentation
of that class directly.
Args:
root: Node used for the upload of the command table
tree: Tree (node path as tuple) of the current node
device_type: Device type.
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "commandtable", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def check_status(self) -> bool:
"""Check status of the command table.
Returns:
Flag if a valid command table is loaded into the device.
Raises:
RuntimeError: If the command table upload into the device failed.
"""
return self._tk_object.check_status()
def load_validation_schema(self) -> Dict[str, Any]:
"""Load device command table validation schema.
Returns:
JSON validation schema for the device command tables.
"""
return self._tk_object.load_validation_schema()
def upload_to_device(
self,
ct: Union[CommandTable, str, dict],
*,
validate: bool = False,
check_upload: bool = True,
) -> None:
"""Upload command table into the device.
The command table can either be specified through the dedicated
``CommandTable`` class or in a raw format, meaning a json string or json
dict. In the case of a json string or dict the command table is
validated by default against the schema provided by the device.
Args:
ct: Command table.
validate: Flag if the command table should be validated. (Only
applies if the command table is passed as a raw json string or
json dict)
check_upload: Flag if the upload should be validated by calling
`check_status`. This is not mandatory bat strongly recommended
since the device does not raise an error when it rejects the
command table. This Flag is ignored when called from within a
transaction.
Raises:
RuntimeError: If the command table upload into the device failed.
zhinst.toolkit.exceptions.ValidationError: Incorrect schema.
.. versionchanged:: 0.4.2
New Flag `check_upload` that makes the upload check optional.
`check_status` is only called when not in a ongoing transaction.
"""
return self._tk_object.upload_to_device(
ct=ct, validate=validate, check_upload=check_upload
)
def load_from_device(self) -> CommandTable:
"""Load command table from the device.
Returns:
command table.
"""
return self._tk_object.load_from_device()
class AWGCore(ZINode):
"""AWG Core Node."""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "awg", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.commandtable:
self.add_submodule(
"commandtable",
CommandTableNode(
self,
self._tk_object.commandtable,
zi_node=self._tk_object.commandtable.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
return self._tk_object.enable_sequencer(single=single)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def compile_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Tuple[bytes, Dict[str, Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
return self._tk_object.compile_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def load_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Dict[str, Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
return self._tk_object.load_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def write_to_waveform_memory(
self, waveforms: Waveforms, indexes: list = None
) -> None:
"""Writes waveforms to the waveform memory.
The waveforms must already be assigned in the sequencer program.
Args:
waveforms: Waveforms that should be uploaded.
indexes: Specify a list of indexes that should be uploaded. If
nothing is specified all available indexes in waveforms will
be uploaded. (default = None)
.. versionchanged:: 0.4.2
Removed `validate` flag and functionality. The validation check is
now done in the `Waveforms.validate` function.
"""
return self._tk_object.write_to_waveform_memory(
waveforms=waveforms, indexes=indexes
)
def read_from_waveform_memory(self, indexes: List[int] = None) -> Waveforms:
"""Read waveforms from the waveform memory.
Args:
indexes: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Waveform object with the downloaded waveforms.
"""
return self._tk_object.read_from_waveform_memory(indexes=indexes)
def configure_marker_and_trigger(
self, *, trigger_in_source: str, trigger_in_slope: str, marker_out_source: str
) -> None:
"""Configures the trigger inputs and marker outputs of the AWG.
Args:
trigger_in_source: Alias for the trigger input used by the
sequencer. For a list of available values use:
`available_trigger_inputs`
trigger_in_slope: Alias for the slope of the input trigger
used by sequencer. For a list of available values use
`available_trigger_inputs`
marker_out_source: Alias for the marker output source used by
the sequencer. For a list of available values use
`available_trigger_slopes`
"""
return self._tk_object.configure_marker_and_trigger(
trigger_in_source=trigger_in_source,
trigger_in_slope=trigger_in_slope,
marker_out_source=marker_out_source,
)
@property
def available_trigger_inputs(self) -> List:
"""List the available trigger sources for the sequencer."""
return self._tk_object.available_trigger_inputs
@property
def available_trigger_slopes(self) -> List:
"""List the available trigger slopes for the sequencer."""
return self._tk_object.available_trigger_slopes
@property
def available_marker_outputs(self) -> List:
"""List the available trigger marker outputs for the sequencer."""
return self._tk_object.available_marker_outputs
class SGChannel(ZINode):
"""Signal Generator Channel for the SHFSG.
:class:`SGChannel` implements basic functionality to configure SGChannel
settings of the :class:`SHFSG` instrument.
Args:
device: SHFQA device object.
session: Underlying session.
tree: Node tree (node path as tuple) of the corresponding node.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"sgchannel_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
if self._tk_object.awg:
self.add_submodule(
"awg",
AWGCore(
self,
self._tk_object.awg,
zi_node=self._tk_object.awg.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def configure_channel(
self, *, enable: bool, output_range: int, center_frequency: float, rf_path: bool
) -> None:
"""Configures the RF input and output.
Args:
enable: Flag if the signal output should be enabled.
output_range: Maximal range of the signal output power in dBm
center_frequency: Center frequency before modulation
rf_path: Flag if the RF(True) or LF(False) path should be
configured.
"""
return self._tk_object.configure_channel(
enable=enable,
output_range=output_range,
center_frequency=center_frequency,
rf_path=rf_path,
)
def configure_pulse_modulation(
self,
*,
enable: bool,
osc_index: int = 0,
osc_frequency: float = 100000000.0,
phase: float = 0.0,
global_amp: float = 0.5,
gains: tuple = (1.0, -1.0, 1.0, 1.0),
sine_generator_index: int = 0,
) -> None:
"""Configure the pulse modulation.
Configures the sine generator to digitally modulate the AWG output, for
generating single sideband AWG signals
Args:
enable: Flag if the modulation should be enabled.
osc_index: Selects which oscillator to use
osc_frequency: Oscillator frequency used to modulate the AWG
outputs. (default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
global_amp: Global scale factor for the AWG outputs. (default = 0.5)
gains: Sets the four amplitudes used for single sideband generation.
Default values correspond to upper sideband with a positive
oscillator frequency. (default = (1.0, -1.0, 1.0, 1.0))
sine_generator_index: Selects which sine generator to use on a
given channel.
"""
return self._tk_object.configure_pulse_modulation(
enable=enable,
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
global_amp=global_amp,
gains=gains,
sine_generator_index=sine_generator_index,
)
def configure_sine_generation(
self,
*,
enable: bool,
osc_index: int = 0,
osc_frequency: float = 100000000.0,
phase: float = 0.0,
gains: tuple = (0.0, 1.0, 1.0, 0.0),
sine_generator_index: int = 0,
) -> None:
"""Configures the sine generator output.
Configures the sine generator output of a specified channel for generating
continuous wave signals without the AWG.
Args:
enable: Flag if the sine generator output should be enabled.
osc_index: Selects which oscillator to use
osc_frequency: Oscillator frequency used by the sine generator
(default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
gains: Sets the four amplitudes used for single sideband
generation. Default values correspond to upper sideband with a
positive oscillator frequency.
Gains are set in the following order I/sin, I/cos, Q/sin, Q/cos.
(default = (0.0, 1.0, 1.0, 0.0))
sine_generator_index: Selects which sine generator to use on a given
channel
"""
return self._tk_object.configure_sine_generation(
enable=enable,
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
gains=gains,
sine_generator_index=sine_generator_index,
)
@property
def awg_modulation_freq(self) -> float:
"""Modulation frequency of the AWG.
Depends on the selected oscillator.
"""
return self._tk_object.awg_modulation_freq
class SHFSG(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments SHFSG."""
def _init_additional_nodes(self):
"""Init class specific modules and parameters."""
if self._tk_object.sgchannels:
channel_list = ZIChannelList(
self,
"sgchannels",
SGChannel,
zi_node=self._tk_object.sgchannels.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.sgchannels):
channel_list.append(
SGChannel(
self,
x,
i,
zi_node=self._tk_object.sgchannels[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("sgchannels", channel_list)
def factory_reset(self, *, deep: bool = True) -> None:
"""Load the factory default settings.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after loading the factory preset (default: True).
"""
return self._tk_object.factory_reset(deep=deep) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/shfsg.py | shfsg.py |
import typing as t
from zhinst.toolkit.driver.devices import DeviceType
from zhinst.qcodes.qcodes_adaptions import init_nodetree, ZIInstrument
if t.TYPE_CHECKING:
from zhinst.qcodes.session import ZISession, Session
from qcodes.instrument.base import Instrument
class ZIBaseInstrument(ZIInstrument):
"""Generic QCoDeS driver for a Zurich Instrument device.
All device specific class are derived from this class.
It implements common functions valid for all
devices.
It also can be used directly, e.g. for instrument types that have no special
class in QCoDeS.
Args:
tk_object: Instance of the toolkit base instrument
name: Name of the instrument in qcodes. (default = "zi_{dev_type}_{serial}")
raw: Flag if qcodes instance should only created with the nodes and not
forwarding the toolkit functions. (default = False)
"""
def __init__(
self,
tk_object: DeviceType,
session: t.Union["ZISession", "Session", "Instrument"],
name: t.Optional[str] = None,
raw: bool = False,
):
self._tk_object = tk_object
self._session = session
if not name:
name = (
f"zi_{tk_object.__class__.__name__.lower()}_{tk_object.serial.lower()}"
)
super().__init__(name, self._tk_object.root)
if not raw:
self._init_additional_nodes()
init_nodetree(self, self._tk_object.root, self._snapshot_cache)
def get_idn(self) -> t.Dict[str, t.Optional[str]]:
"""Fake a standard VISA ``*IDN?`` response."""
return {
"vendor": "Zurich Instruments",
"model": self.device_type,
"serial": self.serial,
"firmware": self.system.fwrevision(),
}
def _init_additional_nodes(self) -> None:
"""Init additional qcodes parameter."""
def factory_reset(self, deep: bool = True) -> None:
"""Load the factory default settings.
Arguments:
deep (bool): A flag that specifies if a synchronization
should be performed between the device and the data
server after loading the factory preset (default: True).
"""
return self._tk_object.factory_reset(deep=deep)
def check_compatibility(self) -> None:
"""Check if the software stack is compatible.
Only if all versions and revisions of the software stack match stability
can be ensured. The following criteria are checked:
* minimum required zhinst-utils package is installed
* minimum required zhinst-core package is installed
* zhinst package matches the LabOne Data Server version
* firmware revision matches the LabOne Data Server version
Raises:
ConnectionError: If the device is currently updating
RuntimeError: If one of the above mentioned criteria is not
fulfilled
"""
self._tk_object.check_compatibility
def get_streamingnodes(self) -> list:
"""Create a dictionary with all streaming nodes available."""
return self._tk_object.get_streamingnodes()
def set_transaction(self):
"""Context manager for a transactional set.
Can be used as a context in a with statement and bundles all node set
commands into a single transaction. This reduces the network overhead
and often increases the speed.
Within the with block a set commands to a node will be buffered
and bundled into a single command at the end automatically.
(All other operations, e.g. getting the value of a node, will not be
affected)
Warning:
The set is always performed as deep set if called on device nodes.
Examples:
>>> with device.set_transaction():
device.test[0].a(1)
device.test[1].a(2)
"""
return self._tk_object.set_transaction()
@property
def serial(self) -> str:
"""Instrument specific serial."""
return self._tk_object.serial
@property
def device_type(self) -> str:
"""Type of the instrument (e.g. MFLI)."""
return self._tk_object.device_type
def device_options(self) -> str:
"""Enabled options of the instrument."""
return self._tk_object.device_options
@property
def session(self) -> "Session":
"""Underlying session the device is connected through."""
return self._session | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/base.py | base.py |
from typing import Any, Dict, List, Tuple, Union
import numpy as np
from zhinst.toolkit import CommandTable, Waveforms, Sequence
from zhinst.toolkit.interface import AveragingMode, SHFQAChannelMode
from zhinst.utils.shfqa.multistate import QuditSettings
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
from zhinst.qcodes.qcodes_adaptions import ZINode, ZIChannelList
class CommandTableNode(ZINode):
"""CommandTable node.
This class implements the basic functionality of the command table allowing
the user to load and upload their own command table.
A dedicated class called ``CommandTable`` exists that is the preferred way
to create a valid command table. For more information about the
``CommandTable`` refer to the corresponding example or the documentation
of that class directly.
Args:
root: Node used for the upload of the command table
tree: Tree (node path as tuple) of the current node
device_type: Device type.
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "commandtable", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def check_status(self) -> bool:
"""Check status of the command table.
Returns:
Flag if a valid command table is loaded into the device.
Raises:
RuntimeError: If the command table upload into the device failed.
"""
return self._tk_object.check_status()
def load_validation_schema(self) -> Dict[str, Any]:
"""Load device command table validation schema.
Returns:
JSON validation schema for the device command tables.
"""
return self._tk_object.load_validation_schema()
def upload_to_device(
self,
ct: Union[CommandTable, str, dict],
*,
validate: bool = False,
check_upload: bool = True,
) -> None:
"""Upload command table into the device.
The command table can either be specified through the dedicated
``CommandTable`` class or in a raw format, meaning a json string or json
dict. In the case of a json string or dict the command table is
validated by default against the schema provided by the device.
Args:
ct: Command table.
validate: Flag if the command table should be validated. (Only
applies if the command table is passed as a raw json string or
json dict)
check_upload: Flag if the upload should be validated by calling
`check_status`. This is not mandatory bat strongly recommended
since the device does not raise an error when it rejects the
command table. This Flag is ignored when called from within a
transaction.
Raises:
RuntimeError: If the command table upload into the device failed.
zhinst.toolkit.exceptions.ValidationError: Incorrect schema.
.. versionchanged:: 0.4.2
New Flag `check_upload` that makes the upload check optional.
`check_status` is only called when not in a ongoing transaction.
"""
return self._tk_object.upload_to_device(
ct=ct, validate=validate, check_upload=check_upload
)
def load_from_device(self) -> CommandTable:
"""Load command table from the device.
Returns:
command table.
"""
return self._tk_object.load_from_device()
class AWGCore(ZINode):
"""AWG Core Node."""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "awg", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.commandtable:
self.add_submodule(
"commandtable",
CommandTableNode(
self,
self._tk_object.commandtable,
zi_node=self._tk_object.commandtable.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
return self._tk_object.enable_sequencer(single=single)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def compile_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Tuple[bytes, Dict[str, Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
return self._tk_object.compile_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def load_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Dict[str, Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
return self._tk_object.load_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def write_to_waveform_memory(
self, waveforms: Waveforms, indexes: list = None
) -> None:
"""Writes waveforms to the waveform memory.
The waveforms must already be assigned in the sequencer program.
Args:
waveforms: Waveforms that should be uploaded.
indexes: Specify a list of indexes that should be uploaded. If
nothing is specified all available indexes in waveforms will
be uploaded. (default = None)
.. versionchanged:: 0.4.2
Removed `validate` flag and functionality. The validation check is
now done in the `Waveforms.validate` function.
"""
return self._tk_object.write_to_waveform_memory(
waveforms=waveforms, indexes=indexes
)
def read_from_waveform_memory(self, indexes: List[int] = None) -> Waveforms:
"""Read waveforms from the waveform memory.
Args:
indexes: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Waveform object with the downloaded waveforms.
"""
return self._tk_object.read_from_waveform_memory(indexes=indexes)
def configure_marker_and_trigger(
self, *, trigger_in_source: str, trigger_in_slope: str, marker_out_source: str
) -> None:
"""Configures the trigger inputs and marker outputs of the AWG.
Args:
trigger_in_source: Alias for the trigger input used by the
sequencer. For a list of available values use:
`available_trigger_inputs`
trigger_in_slope: Alias for the slope of the input trigger
used by sequencer. For a list of available values use
`available_trigger_inputs`
marker_out_source: Alias for the marker output source used by
the sequencer. For a list of available values use
`available_trigger_slopes`
"""
return self._tk_object.configure_marker_and_trigger(
trigger_in_source=trigger_in_source,
trigger_in_slope=trigger_in_slope,
marker_out_source=marker_out_source,
)
@property
def available_trigger_inputs(self) -> List:
"""List the available trigger sources for the sequencer."""
return self._tk_object.available_trigger_inputs
@property
def available_trigger_slopes(self) -> List:
"""List the available trigger slopes for the sequencer."""
return self._tk_object.available_trigger_slopes
@property
def available_marker_outputs(self) -> List:
"""List the available trigger marker outputs for the sequencer."""
return self._tk_object.available_marker_outputs
class SGChannel(ZINode):
"""Signal Generator Channel for the SHFSG.
:class:`SGChannel` implements basic functionality to configure SGChannel
settings of the :class:`SHFSG` instrument.
Args:
device: SHFQA device object.
session: Underlying session.
tree: Node tree (node path as tuple) of the corresponding node.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"sgchannel_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
if self._tk_object.awg:
self.add_submodule(
"awg",
AWGCore(
self,
self._tk_object.awg,
zi_node=self._tk_object.awg.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def configure_channel(
self, *, enable: bool, output_range: int, center_frequency: float, rf_path: bool
) -> None:
"""Configures the RF input and output.
Args:
enable: Flag if the signal output should be enabled.
output_range: Maximal range of the signal output power in dBm
center_frequency: Center frequency before modulation
rf_path: Flag if the RF(True) or LF(False) path should be
configured.
"""
return self._tk_object.configure_channel(
enable=enable,
output_range=output_range,
center_frequency=center_frequency,
rf_path=rf_path,
)
def configure_pulse_modulation(
self,
*,
enable: bool,
osc_index: int = 0,
osc_frequency: float = 100000000.0,
phase: float = 0.0,
global_amp: float = 0.5,
gains: tuple = (1.0, -1.0, 1.0, 1.0),
sine_generator_index: int = 0,
) -> None:
"""Configure the pulse modulation.
Configures the sine generator to digitally modulate the AWG output, for
generating single sideband AWG signals
Args:
enable: Flag if the modulation should be enabled.
osc_index: Selects which oscillator to use
osc_frequency: Oscillator frequency used to modulate the AWG
outputs. (default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
global_amp: Global scale factor for the AWG outputs. (default = 0.5)
gains: Sets the four amplitudes used for single sideband generation.
Default values correspond to upper sideband with a positive
oscillator frequency. (default = (1.0, -1.0, 1.0, 1.0))
sine_generator_index: Selects which sine generator to use on a
given channel.
"""
return self._tk_object.configure_pulse_modulation(
enable=enable,
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
global_amp=global_amp,
gains=gains,
sine_generator_index=sine_generator_index,
)
def configure_sine_generation(
self,
*,
enable: bool,
osc_index: int = 0,
osc_frequency: float = 100000000.0,
phase: float = 0.0,
gains: tuple = (0.0, 1.0, 1.0, 0.0),
sine_generator_index: int = 0,
) -> None:
"""Configures the sine generator output.
Configures the sine generator output of a specified channel for generating
continuous wave signals without the AWG.
Args:
enable: Flag if the sine generator output should be enabled.
osc_index: Selects which oscillator to use
osc_frequency: Oscillator frequency used by the sine generator
(default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
gains: Sets the four amplitudes used for single sideband
generation. Default values correspond to upper sideband with a
positive oscillator frequency.
Gains are set in the following order I/sin, I/cos, Q/sin, Q/cos.
(default = (0.0, 1.0, 1.0, 0.0))
sine_generator_index: Selects which sine generator to use on a given
channel
"""
return self._tk_object.configure_sine_generation(
enable=enable,
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
gains=gains,
sine_generator_index=sine_generator_index,
)
@property
def awg_modulation_freq(self) -> float:
"""Modulation frequency of the AWG.
Depends on the selected oscillator.
"""
return self._tk_object.awg_modulation_freq
class Generator(ZINode):
"""Generator node.
Implements basic functionality of the generator allowing the user to write
and upload their *'.seqC'* code.
In contrast to other AWG Sequencers, e.g. from the HDAWG, SHFSG
it does not provide writing access to the Waveform Memories
and hence does not come with predefined waveforms such as `gauss`
or `ones`. Therefore, all waveforms need to be defined in Python
and uploaded to the device using `upload_waveforms` method.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
daq_server: Instance of the ziDAQServer
serial: Serial of the device.
index: Index of the corresponding awg channel
max_qubits_per_channel: Max qubits per channel
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "generator", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
return self._tk_object.enable_sequencer(single=single)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def compile_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Tuple[bytes, Dict[str, Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
return self._tk_object.compile_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def load_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Dict[str, Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
return self._tk_object.load_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def write_to_waveform_memory(
self, pulses: Union[Waveforms, dict], *, clear_existing: bool = True
) -> None:
"""Writes pulses to the waveform memory.
Args:
pulses: Waveforms that should be uploaded.
clear_existing: Flag whether to clear the waveform memory before the
present upload. (default = True)
"""
return self._tk_object.write_to_waveform_memory(
pulses=pulses, clear_existing=clear_existing
)
def read_from_waveform_memory(self, slots: List[int] = None) -> Waveforms:
"""Read pulses from the waveform memory.
Args:
slots: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Mutable mapping of the downloaded waveforms.
"""
return self._tk_object.read_from_waveform_memory(slots=slots)
def configure_sequencer_triggering(
self, *, aux_trigger: str, play_pulse_delay: float = 0.0
) -> None:
"""Configure the sequencer triggering.
Args:
aux_trigger: Alias for the trigger source used in the sequencer.
For the list of available values, use `available_aux_trigger_inputs`
play_pulse_delay: Delay in seconds before the start of waveform playback.
"""
return self._tk_object.configure_sequencer_triggering(
aux_trigger=aux_trigger, play_pulse_delay=play_pulse_delay
)
@property
def available_aux_trigger_inputs(self) -> List:
"""List of available aux trigger sources for the generator."""
return self._tk_object.available_aux_trigger_inputs
class Qudit(ZINode):
"""Single Qudit node.
Implements basic functionality of a single qudit node, e.g applying the
basic configuration.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
readout_channel: Index of the readout channel this qudit belongs to.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"qudit_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
def configure(self, qudit_settings: QuditSettings, enable: bool = True) -> None:
"""Compiles a list of transactions to apply the qudit settings to the device.
Args:
qudit_settings: The qudit settings to be configured.
enable: Whether to enable the qudit. (default: True)
"""
return self._tk_object.configure(qudit_settings=qudit_settings, enable=enable)
class MultiState(ZINode):
"""MultiState node.
Implements basic functionality of the MultiState node.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
index: Index of the corresponding readout channel.
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "multistate", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.qudits:
channel_list = ZIChannelList(
self,
"qudits",
Qudit,
zi_node=self._tk_object.qudits.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.qudits):
channel_list.append(
Qudit(
self,
x,
i,
zi_node=self._tk_object.qudits[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("qudits", channel_list)
def get_qudits_results(self) -> Dict[int, np.ndarray]:
"""Downloads the qudit results from the device and group them by qudit.
This function accesses the multistate nodes to determine which
integrators were used for which qudit to able to group the results by
qudit.
Returns:
A dictionary with the qudit index keys and result vector values.
"""
return self._tk_object.get_qudits_results()
class Readout(ZINode):
"""Readout node.
Implements basic functionality of the readout, e.g allowing the user to
write the integration weight.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
serial: Serial of the device.
index: Index of the corresponding awg channel
max_qubits_per_channel: Max qubits per channel
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "readout", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.multistate:
self.add_submodule(
"multistate",
MultiState(
self,
self._tk_object.multistate,
zi_node=self._tk_object.multistate.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def configure_result_logger(
self,
*,
result_source: str,
result_length: int,
num_averages: int = 1,
averaging_mode: AveragingMode = AveragingMode.CYCLIC,
) -> None:
"""Configures the result logger for readout mode.
Args:
result_source: String-based tag to select the result source in readout
mode, e.g. "result_of_integration" or "result_of_discrimination".
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n
averaging_mode: Select the averaging order of the result, with
0 = cyclic and 1 = sequential.
"""
return self._tk_object.configure_result_logger(
result_source=result_source,
result_length=result_length,
num_averages=num_averages,
averaging_mode=averaging_mode,
)
def run(self) -> None:
"""Reset and enable the result logger."""
return self._tk_object.run()
def stop(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Stop the result logger.
Args:
timeout: The maximum waiting time in seconds for the Readout
(default: 10).
sleep_time: Sleep interval in seconds. (default = 0.05)
Raises:
TimeoutError: The result logger could not been stopped within the
given time.
"""
return self._tk_object.stop(timeout=timeout, sleep_time=sleep_time)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Wait until the readout is finished.
Args:
timeout: The maximum waiting time in seconds for the Readout
(default: 10).
sleep_time: Sleep interval in seconds. (default = 0.05)
Raises:
TimeoutError: if the readout recording is not completed within the
given time.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def read(self, *, timeout: float = 10) -> np.array:
"""Waits until the logger finished recording and returns the measured data.
Args:
timeout: Maximum time to wait for data in seconds (default = 10s)
Returns:
Result logger data.
"""
return self._tk_object.read(timeout=timeout)
def write_integration_weights(
self,
weights: Union[Waveforms, dict],
*,
integration_delay: float = 0.0,
clear_existing: bool = True,
) -> None:
"""Configures the weighted integration.
Args:
weights: Dictionary containing the complex weight vectors, where
keys correspond to the indices of the integration units to be
configured.
integration_delay: Delay in seconds before starting the readout.
(default = 0.0)
clear_existing: Flag whether to clear the waveform memory before
the present upload. (default = True)
"""
return self._tk_object.write_integration_weights(
weights=weights,
integration_delay=integration_delay,
clear_existing=clear_existing,
)
def read_integration_weights(self, slots: List[int] = None) -> Waveforms:
"""Read integration weights from the waveform memory.
Args:
slots: List of weight slots to read from the device. If not specified
all available weights will be downloaded.
Returns:
Mutable mapping of the downloaded weights.
"""
return self._tk_object.read_integration_weights(slots=slots)
class Spectroscopy(ZINode):
"""Spectroscopy node.
Implements basic functionality of the spectroscopy, e.g allowing the user to
read the result logger data.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
serial: Serial of the device.
index: Index of the corresponding awg channel
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "spectroscopy", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def configure_result_logger(
self,
*,
result_length: int,
num_averages: int = 1,
averaging_mode: AveragingMode = AveragingMode.CYCLIC,
) -> None:
"""Configures the result logger for spectroscopy mode.
Args:
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n.
averaging_mode: Averaging order of the result.
"""
return self._tk_object.configure_result_logger(
result_length=result_length,
num_averages=num_averages,
averaging_mode=averaging_mode,
)
def run(self) -> None:
"""Resets and enables the spectroscopy result logger."""
return self._tk_object.run()
def stop(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Stop the result logger.
Args:
timeout: The maximum waiting time in seconds for the
Spectroscopy (default: 10).
sleep_time: Time in seconds to wait between
requesting Spectroscopy state
Raises:
TimeoutError: If the result logger could not been stopped within the
given time.
"""
return self._tk_object.stop(timeout=timeout, sleep_time=sleep_time)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Wait until spectroscopy is finished.
Args:
timeout (float): The maximum waiting time in seconds for the
Spectroscopy (default: 10).
sleep_time (float): Time in seconds to wait between
requesting Spectroscopy state
Raises:
TimeoutError: if the spectroscopy recording is not completed within the
given time.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def read(self, *, timeout: float = 10) -> np.array:
"""Waits until the logger finished recording and returns the measured data.
Args:
timeout: Maximum time to wait for data in seconds (default = 10s)
Returns:
An array containing the result logger data.
"""
return self._tk_object.read(timeout=timeout)
class QAChannel(ZINode):
"""Quantum Analyzer Channel for the SHFQA.
:class:`QAChannel` implements basic functionality to configure QAChannel
settings of the :class:`SHFQA` instrument.
Besides the :class:`Generator`, :class:`Readout` and :class:`Sweeper`
modules it also provides an easy access to commonly used `QAChannel` parameters.
Args:
device: SHFQA device object.
session: Underlying session.
tree: Node tree (node path as tuple) of the corresponding node.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"qachannel_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
if self._tk_object.generator:
self.add_submodule(
"generator",
Generator(
self,
self._tk_object.generator,
zi_node=self._tk_object.generator.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
if self._tk_object.readout:
self.add_submodule(
"readout",
Readout(
self,
self._tk_object.readout,
zi_node=self._tk_object.readout.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
if self._tk_object.spectroscopy:
self.add_submodule(
"spectroscopy",
Spectroscopy(
self,
self._tk_object.spectroscopy,
zi_node=self._tk_object.spectroscopy.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def configure_channel(
self,
*,
input_range: int,
output_range: int,
center_frequency: float,
mode: SHFQAChannelMode,
) -> None:
"""Configures the RF input and output of a specified channel.
Args:
input_range: Maximal range of the signal input power in dBm
output_range: Maximal range of the signal output power in dBm
center_frequency: Center frequency of the analysis band [Hz]
mode: Select between spectroscopy and readout mode.
"""
return self._tk_object.configure_channel(
input_range=input_range,
output_range=output_range,
center_frequency=center_frequency,
mode=mode,
)
class SHFScope(ZINode):
"""SHFQA Scope Node.
Implements basic functionality of the scope node, e.g allowing the user to
read the data.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
daq_server: Instance of the ziDAQServer
serial: Serial of the device.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self,
parent,
f"shfscope_{index}",
snapshot_cache=snapshot_cache,
zi_node=zi_node,
)
self._tk_object = tk_object
def run(
self, *, single: bool = True, timeout: float = 10, sleep_time: float = 0.005
) -> None:
"""Run the scope recording.
Args:
timeout: The maximum waiting time in seconds for the Scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not start within the specified
timeout.
"""
return self._tk_object.run(
single=single, timeout=timeout, sleep_time=sleep_time
)
def stop(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Stop the scope recording.
Args:
timeout: The maximum waiting time in seconds for the scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not stop within the specified
timeout.
"""
return self._tk_object.stop(timeout=timeout, sleep_time=sleep_time)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the scope recording is finished.
Args:
timeout: The maximum waiting time in seconds for the Scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not finish within the specified
timeout.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def configure(
self,
*,
input_select: Dict[int, str],
num_samples: int,
trigger_input: str,
num_segments: int = 1,
num_averages: int = 1,
trigger_delay: float = 0,
) -> None:
"""Configures the scope for a measurement.
Args:
input_select: Map of a specific scope channel an their signal
source, e.g. "channel0_signal_input". (For a list of available
values use `available_inputs`)
num_samples: Number samples to recorded in a scope shot.
trigger_input: Specifies the trigger source of the scope
acquisition - if set to None, the self-triggering mode of the
scope becomes active, which is useful e.g. for the GUI.
For a list of available trigger values use
`available_trigger_inputs`.
num_segments: Number of distinct scope shots to be returned after
ending the acquisition.
num_averages: Specifies how many times each segment should be
averaged on hardware; to finish a scope acquisition, the number
of issued triggers must be equal to num_segments * num_averages.
trigger_delay: delay in samples specifying the time between the
start of data acquisition and reception of a trigger.
"""
return self._tk_object.configure(
input_select=input_select,
num_samples=num_samples,
trigger_input=trigger_input,
num_segments=num_segments,
num_averages=num_averages,
trigger_delay=trigger_delay,
)
def read(self, *, timeout: float = 10) -> tuple:
"""Read out the recorded data from the scope.
Args:
timeout: The maximum waiting time in seconds for the
Scope (default: 10).
Returns:
(recorded_data, recorded_data_range, scope_time)
Raises:
TimeoutError: if the scope recording is not completed before
timeout.
"""
return self._tk_object.read(timeout=timeout)
@property
def available_trigger_inputs(self) -> List:
"""List of the available trigger sources for the scope."""
return self._tk_object.available_trigger_inputs
@property
def available_inputs(self) -> List:
"""List of the available signal sources for the scope channels."""
return self._tk_object.available_inputs
class SHFQC(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments SHFQC."""
def _init_additional_nodes(self):
"""Init class specific modules and parameters."""
if self._tk_object.sgchannels:
channel_list = ZIChannelList(
self,
"sgchannels",
SGChannel,
zi_node=self._tk_object.sgchannels.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.sgchannels):
channel_list.append(
SGChannel(
self,
x,
i,
zi_node=self._tk_object.sgchannels[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("sgchannels", channel_list)
if self._tk_object.qachannels:
channel_list = ZIChannelList(
self,
"qachannels",
QAChannel,
zi_node=self._tk_object.qachannels.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.qachannels):
channel_list.append(
QAChannel(
self,
x,
i,
zi_node=self._tk_object.qachannels[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("qachannels", channel_list)
if self._tk_object.scopes:
channel_list = ZIChannelList(
self,
"scopes",
SHFScope,
zi_node=self._tk_object.scopes.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.scopes):
channel_list.append(
SHFScope(
self,
x,
i,
zi_node=self._tk_object.scopes[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("scopes", channel_list)
def factory_reset(self, *, deep: bool = True) -> None:
"""Load the factory default settings.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after loading the factory preset (default: True).
"""
return self._tk_object.factory_reset(deep=deep)
def start_continuous_sw_trigger(
self, *, num_triggers: int, wait_time: float
) -> None:
"""Issues a specified number of software triggers.
Issues a specified number of software triggers with a certain wait time
in between. The function guarantees reception and proper processing of
all triggers by the device, but the time between triggers is
non-deterministic by nature of software triggering. Only use this
function for prototyping and/or cases without strong timing requirements.
Args:
num_triggers: Number of triggers to be issued
wait_time: Time between triggers in seconds
"""
return self._tk_object.start_continuous_sw_trigger(
num_triggers=num_triggers, wait_time=wait_time
)
@property
def max_qubits_per_channel(self) -> int:
"""Maximum number of supported qubits per channel."""
return self._tk_object.max_qubits_per_channel | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/shfqc.py | shfqc.py |
from typing import List, Union
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
class PQSC(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments PQSC."""
def _init_additional_nodes(self):
"""Init class specific modules and parameters."""
def arm(self, *, deep=True, repetitions: int = None, holdoff: float = None) -> None:
"""Prepare PQSC for triggering the instruments.
This method configures the execution engine of the PQSC and
clears the register bank. Optionally, the *number of triggers*
and *hold-off time* can be set when specified as keyword
arguments. If they are not specified, they are not changed.
Note that the PQSC is disabled at the end of the hold-off time
after sending out the last trigger. Therefore, the hold-off time
should be long enough such that the PQSC is still enabled when
the feedback arrives. Otherwise, the feedback cannot be processed.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after stopping the PQSC and clearing the
register bank (default: True).
repetitions: If specified, the number of triggers sent
over ZSync ports will be set (default: None).
holdoff: If specified, the time between repeated
triggers sent over ZSync ports will be set. It has a
minimum value and a granularity of 100 ns
(default: None).
"""
return self._tk_object.arm(deep=deep, repetitions=repetitions, holdoff=holdoff)
def run(self, *, deep: bool = True) -> None:
"""Start sending out triggers.
This method activates the trigger generation to trigger all
connected instruments over ZSync ports.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after enabling the PQSC (default: True).
"""
return self._tk_object.run(deep=deep)
def arm_and_run(self, *, repetitions: int = None, holdoff: float = None) -> None:
"""Arm the PQSC and start sending out triggers.
Simply combines the methods arm and run. A synchronization
is performed between the device and the data server after
arming and running the PQSC.
Args:
repetitions: If specified, the number of triggers sent
over ZSync ports will be set (default: None).
holdoff: If specified, the time between repeated
triggers sent over ZSync ports will be set. It has a
minimum value and a granularity of 100 ns
(default: None).
"""
return self._tk_object.arm_and_run(repetitions=repetitions, holdoff=holdoff)
def stop(self, *, deep: bool = True) -> None:
"""Stop the trigger generation.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after disabling the PQSC (default: True).
"""
return self._tk_object.stop(deep=deep)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until trigger generation and feedback processing is done.
Args:
timeout: The maximum waiting time in seconds for the
PQSC (default: 10).
sleep_time: Time in seconds to wait between
requesting PQSC state
Raises:
TimeoutError: If the PQSC is not done sending out all
triggers and processing feedback before the timeout.
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def check_ref_clock(self, *, timeout: int = 30, sleep_time: int = 1) -> bool:
"""Check if reference clock is locked successfully.
Args:
timeout: Maximum time in seconds the program waits
(default: 30).
sleep_time: Time in seconds to wait between
requesting the reference clock status (default: 1)
Raises:
TimeoutError: If the process of locking to the reference clock
exceeds the specified timeout.
"""
return self._tk_object.check_ref_clock(timeout=timeout, sleep_time=sleep_time)
def check_zsync_connection(
self,
ports: Union[List[int], int] = 0,
*,
timeout: int = 30,
sleep_time: int = 1,
) -> Union[List[bool], bool]:
"""Check if the ZSync connection on the given port(s) is established.
This function checks the current status of the instrument connected to
the given ports.
Args:
ports: The port numbers to check the ZSync connection for.
It can either be a single port number given as integer or a list
of several port numbers. (default: 0)
timeout: Maximum time in seconds the program waits (default: 30).
sleep_time: Time in seconds to wait between requesting the reference
clock status (default: 1)
Raises:
TimeoutError: If the process of establishing a ZSync connection on
one of the specified ports exceeds the specified timeout.
"""
return self._tk_object.check_zsync_connection(
ports=ports, timeout=timeout, sleep_time=sleep_time
)
def find_zsync_worker_port(self, device: ZIBaseInstrument) -> int:
"""Find the ID of the PQSC ZSync port connected to a given device.
Args:
pqsc: PQSC device over whose ports the research shall be done.
device: device for which the connected ZSync port shall be found.
Returns:
Integer value represent the ID of the searched PQSC Zsync port.
Raises:
ToolkitError: If the given device doesn't appear to be connected
to the PQSC via ZSync.
.. versionadded:: 0.5.1
"""
return self._tk_object.find_zsync_worker_port(device=device._tk_object) | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/pqsc.py | pqsc.py |
from typing import Any, Dict, List, Tuple, Union
from zhinst.toolkit import CommandTable, Waveforms, Sequence
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
from zhinst.qcodes.qcodes_adaptions import ZINode, ZIChannelList
class CommandTableNode(ZINode):
"""CommandTable node.
This class implements the basic functionality of the command table allowing
the user to load and upload their own command table.
A dedicated class called ``CommandTable`` exists that is the preferred way
to create a valid command table. For more information about the
``CommandTable`` refer to the corresponding example or the documentation
of that class directly.
Args:
root: Node used for the upload of the command table
tree: Tree (node path as tuple) of the current node
device_type: Device type.
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "commandtable", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def check_status(self) -> bool:
"""Check status of the command table.
Returns:
Flag if a valid command table is loaded into the device.
Raises:
RuntimeError: If the command table upload into the device failed.
"""
return self._tk_object.check_status()
def load_validation_schema(self) -> Dict[str, Any]:
"""Load device command table validation schema.
Returns:
JSON validation schema for the device command tables.
"""
return self._tk_object.load_validation_schema()
def upload_to_device(
self,
ct: Union[CommandTable, str, dict],
*,
validate: bool = False,
check_upload: bool = True,
) -> None:
"""Upload command table into the device.
The command table can either be specified through the dedicated
``CommandTable`` class or in a raw format, meaning a json string or json
dict. In the case of a json string or dict the command table is
validated by default against the schema provided by the device.
Args:
ct: Command table.
validate: Flag if the command table should be validated. (Only
applies if the command table is passed as a raw json string or
json dict)
check_upload: Flag if the upload should be validated by calling
`check_status`. This is not mandatory bat strongly recommended
since the device does not raise an error when it rejects the
command table. This Flag is ignored when called from within a
transaction.
Raises:
RuntimeError: If the command table upload into the device failed.
zhinst.toolkit.exceptions.ValidationError: Incorrect schema.
.. versionchanged:: 0.4.2
New Flag `check_upload` that makes the upload check optional.
`check_status` is only called when not in a ongoing transaction.
"""
return self._tk_object.upload_to_device(
ct=ct, validate=validate, check_upload=check_upload
)
def load_from_device(self) -> CommandTable:
"""Load command table from the device.
Returns:
command table.
"""
return self._tk_object.load_from_device()
class AWG(ZINode):
"""AWG node.
This class implements the basic functionality for the device specific
arbitrary waveform generator.
Besides the upload/compilation of sequences it offers the upload of
waveforms and command tables.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
session: Underlying session.
serial: Serial of the device.
index: Index of the corresponding awg channel
device_type: Device type
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, f"awg_{index}", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.commandtable:
self.add_submodule(
"commandtable",
CommandTableNode(
self,
self._tk_object.commandtable,
zi_node=self._tk_object.commandtable.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
return self._tk_object.enable_sequencer(single=single)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def compile_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Tuple[bytes, Dict[str, Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
return self._tk_object.compile_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def load_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Dict[str, Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
return self._tk_object.load_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def write_to_waveform_memory(
self, waveforms: Waveforms, indexes: list = None
) -> None:
"""Writes waveforms to the waveform memory.
The waveforms must already be assigned in the sequencer program.
Args:
waveforms: Waveforms that should be uploaded.
indexes: Specify a list of indexes that should be uploaded. If
nothing is specified all available indexes in waveforms will
be uploaded. (default = None)
.. versionchanged:: 0.4.2
Removed `validate` flag and functionality. The validation check is
now done in the `Waveforms.validate` function.
"""
return self._tk_object.write_to_waveform_memory(
waveforms=waveforms, indexes=indexes
)
def read_from_waveform_memory(self, indexes: List[int] = None) -> Waveforms:
"""Read waveforms from the waveform memory.
Args:
indexes: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Waveform object with the downloaded waveforms.
"""
return self._tk_object.read_from_waveform_memory(indexes=indexes)
class HDAWG(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments HDAWG."""
def _init_additional_nodes(self):
"""Init class specific modules and parameters."""
if self._tk_object.awgs:
channel_list = ZIChannelList(
self,
"awgs",
AWG,
zi_node=self._tk_object.awgs.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.awgs):
channel_list.append(
AWG(
self,
x,
i,
zi_node=self._tk_object.awgs[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("awgs", channel_list)
def enable_qccs_mode(self) -> None:
"""Configure the instrument to work with PQSC.
This method sets the reference clock source to
connect the instrument to the PQSC.
Info:
Use ``factory_reset`` to reset the changes if necessary
"""
return self._tk_object.enable_qccs_mode() | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/hdawg.py | hdawg.py |
from typing import Union, Optional, List, Dict, Any, Tuple
import numpy as np
from zhinst.toolkit import CommandTable, Waveforms, Sequence
from zhinst.qcodes.driver.devices.base import ZIBaseInstrument
from zhinst.qcodes.qcodes_adaptions import ZINode, ZIChannelList
class CommandTableNode(ZINode):
"""CommandTable node.
This class implements the basic functionality of the command table allowing
the user to load and upload their own command table.
A dedicated class called ``CommandTable`` exists that is the preferred way
to create a valid command table. For more information about the
``CommandTable`` refer to the corresponding example or the documentation
of that class directly.
Args:
root: Node used for the upload of the command table
tree: Tree (node path as tuple) of the current node
device_type: Device type.
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "commandtable", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def check_status(self) -> bool:
"""Check status of the command table.
Returns:
Flag if a valid command table is loaded into the device.
Raises:
RuntimeError: If the command table upload into the device failed.
"""
return self._tk_object.check_status()
def load_validation_schema(self) -> Dict[str, Any]:
"""Load device command table validation schema.
Returns:
JSON validation schema for the device command tables.
"""
return self._tk_object.load_validation_schema()
def upload_to_device(
self,
ct: Union[CommandTable, str, dict],
*,
validate: bool = False,
check_upload: bool = True,
) -> None:
"""Upload command table into the device.
The command table can either be specified through the dedicated
``CommandTable`` class or in a raw format, meaning a json string or json
dict. In the case of a json string or dict the command table is
validated by default against the schema provided by the device.
Args:
ct: Command table.
validate: Flag if the command table should be validated. (Only
applies if the command table is passed as a raw json string or
json dict)
check_upload: Flag if the upload should be validated by calling
`check_status`. This is not mandatory bat strongly recommended
since the device does not raise an error when it rejects the
command table. This Flag is ignored when called from within a
transaction.
Raises:
RuntimeError: If the command table upload into the device failed.
zhinst.toolkit.exceptions.ValidationError: Incorrect schema.
.. versionchanged:: 0.4.2
New Flag `check_upload` that makes the upload check optional.
`check_status` is only called when not in a ongoing transaction.
"""
return self._tk_object.upload_to_device(
ct=ct, validate=validate, check_upload=check_upload
)
def load_from_device(self) -> CommandTable:
"""Load command table from the device.
Returns:
command table.
"""
return self._tk_object.load_from_device()
class AWG(ZINode):
"""AWG node.
This class implements the basic functionality for the device specific
arbitrary waveform generator.
Besides the upload/compilation of sequences it offers the upload of
waveforms and command tables.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
session: Underlying session.
serial: Serial of the device.
index: Index of the corresponding awg channel
device_type: Device type
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, f"awg_{index}", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.commandtable:
self.add_submodule(
"commandtable",
CommandTableNode(
self,
self._tk_object.commandtable,
zi_node=self._tk_object.commandtable.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
return self._tk_object.enable_sequencer(single=single)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
return self._tk_object.wait_done(timeout=timeout, sleep_time=sleep_time)
def compile_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Tuple[bytes, Dict[str, Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
return self._tk_object.compile_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def load_sequencer_program(
self, sequencer_program: Union[str, Sequence], **kwargs: Union[str, int]
) -> Dict[str, Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
return self._tk_object.load_sequencer_program(
sequencer_program=sequencer_program, **kwargs
)
def write_to_waveform_memory(
self, waveforms: Waveforms, indexes: list = None
) -> None:
"""Writes waveforms to the waveform memory.
The waveforms must already be assigned in the sequencer program.
Args:
waveforms: Waveforms that should be uploaded.
indexes: Specify a list of indexes that should be uploaded. If
nothing is specified all available indexes in waveforms will
be uploaded. (default = None)
.. versionchanged:: 0.4.2
Removed `validate` flag and functionality. The validation check is
now done in the `Waveforms.validate` function.
"""
return self._tk_object.write_to_waveform_memory(
waveforms=waveforms, indexes=indexes
)
def read_from_waveform_memory(self, indexes: List[int] = None) -> Waveforms:
"""Read waveforms from the waveform memory.
Args:
indexes: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Waveform object with the downloaded waveforms.
"""
return self._tk_object.read_from_waveform_memory(indexes=indexes)
class Integration(ZINode):
"""Integration part for the UHFQA.
Args:
root: Underlying node tree.
tree: tree (node path as tuple) of the corresponding node.
.. versionadded:: 0.3.2
"""
def __init__(self, parent, tk_object, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, "integration", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
def write_integration_weights(self, weights: Union[Waveforms, dict]) -> None:
"""Upload complex integration weights.
The weight functions are applied to the real and imaginary part of
the input signal. In the hardware the weights are implemented
as 17-bit integers.
Args:
weights: Dictionary containing the weight functions, where
keys correspond to the indices of the integration weights to be
configured.
Note:
Does not raise an error when sample limit is exceeded, but applies only
the maximum number of samples. Please refer to LabOne node documentation
for the number of maximum integration weight samples.
Note:
This function calls both `/qas/n/integration/weights/n/real` and
`/qas/n/integration/weights/n/imag` nodes.
If only real or imaginary part is defined, the number of defined samples
from the other one is zeroed.
"""
return self._tk_object.write_integration_weights(weights=weights)
class QAS(ZINode):
"""Quantum Analyzer Channel for the UHFQA.
Args:
root: Underlying node tree.
tree: tree (node path as tuple) of the corresponding node.
"""
def __init__(self, parent, tk_object, index, snapshot_cache=None, zi_node=None):
ZINode.__init__(
self, parent, f"qas_{index}", snapshot_cache=snapshot_cache, zi_node=zi_node
)
self._tk_object = tk_object
if self._tk_object.integration:
self.add_submodule(
"integration",
Integration(
self,
self._tk_object.integration,
zi_node=self._tk_object.integration.node_info.path,
snapshot_cache=self._snapshot_cache,
),
)
def crosstalk_matrix(self, matrix: np.ndarray = None) -> Optional[np.ndarray]:
"""Sets or gets the crosstalk matrix of the UHFQA as a 2D array.
Args:
matrix: The 2D matrix used in the digital signal
processing path to compensate for crosstalk between the
different channels. The given matrix can also be a part
of the entire 10 x 10 matrix. Its maximum dimensions
are 10 x 10 (default: None).
Returns:
If no argument is given the method returns the current
crosstalk matrix as a 2D numpy array.
Raises:
ValueError: If the matrix size exceeds the maximum size of
10 x 10
"""
return self._tk_object.crosstalk_matrix(matrix=matrix)
def adjusted_delay(self, value: int = None) -> int:
"""Set or get the adjustment in the quantum analyzer delay.
Adjusts the delay that defines the time at which the integration starts
in relation to the trigger signal of the weighted integration units.
Depending if the deskew matrix is bypassed there exists a different
default delay. This function can be used to add an additional delay to
the default delay.
Args:
value: Number of additional samples to adjust the delay. If not
specified this function will just return the additional delay
currently set.
Returns:
The adjustment in delay in units of samples.
Raises:
ValueError: If the adjusted quantum analyzer delay is outside the
allowed range of 1021 samples.
"""
return self._tk_object.adjusted_delay(value=value)
class UHFQA(ZIBaseInstrument):
"""QCoDeS driver for the Zurich Instruments UHFQA."""
def _init_additional_nodes(self):
"""Init class specific modules and parameters."""
if self._tk_object.awgs:
channel_list = ZIChannelList(
self,
"awgs",
AWG,
zi_node=self._tk_object.awgs.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.awgs):
channel_list.append(
AWG(
self,
x,
i,
zi_node=self._tk_object.awgs[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("awgs", channel_list)
if self._tk_object.qas:
channel_list = ZIChannelList(
self,
"qas",
QAS,
zi_node=self._tk_object.qas.node_info.path,
snapshot_cache=self._snapshot_cache,
)
for i, x in enumerate(self._tk_object.qas):
channel_list.append(
QAS(
self,
x,
i,
zi_node=self._tk_object.qas[i].node_info.path,
snapshot_cache=self._snapshot_cache,
)
)
# channel_list.lock()
self.add_submodule("qas", channel_list)
def enable_qccs_mode(self) -> None:
"""Configure the instrument to work with PQSC.
This method sets the reference clock source and DIO settings
correctly to connect the instrument to the PQSC.
Info:
Use ``factory_reset`` to reset the changes if necessary
"""
return self._tk_object.enable_qccs_mode() | zhinst-qcodes | /zhinst_qcodes-0.5.2-py3-none-any.whl/zhinst/qcodes/driver/devices/uhfqa.py | uhfqa.py |
import json
import typing as t
from collections.abc import MutableMapping
from enum import IntFlag
from pathlib import Path
from contextlib import contextmanager
import zhinst.toolkit.driver.devices as tk_devices
import zhinst.toolkit.driver.modules as tk_modules
from zhinst.toolkit.exceptions import ToolkitError
from zhinst import core
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import lazy_property, NodeDict
from zhinst.toolkit.nodetree.nodetree import Transaction
class Devices(MutableMapping):
"""Mapping class for the connected devices.
Maps the connected devices from data server to lazy device objects.
On every access the connected devices are read from the data server. This
ensures that even if devices get connected/disconnected through another
session the list will be up to date.
Args:
session: An active session to the data server.
"""
def __init__(self, session: "Session"):
self._session = session
self._devices: t.Dict[str, tk_devices.DeviceType] = {}
self._device_classes = tk_devices.DEVICE_CLASS_BY_MODEL
def __getitem__(self, key) -> tk_devices.DeviceType:
key = key.lower()
if key in self.connected():
if key not in self._devices:
self._devices[key] = self._create_device(key)
# start a transaction if the session has a ongoing one
if self._session.multi_transaction.in_progress():
self._devices[key].root.transaction.start(
self._session.multi_transaction.add
)
return self._devices[key]
self._devices.pop(key, None)
raise KeyError(key)
def __setitem__(self, *_):
raise LookupError(
"Illegal operation. Can not add a device manually. Devices must be "
"connected through the session (session.connect_device)."
)
def __delitem__(self, key):
self._devices.pop(key, None)
def __iter__(self):
return iter(self.connected())
def __len__(self):
return len(self.connected())
def _create_device(self, serial: str) -> tk_devices.DeviceType:
"""Creates a new device object.
Maps the device type to the correct instrument class (The default is
the ``BaseInstrument`` which is a generic instrument class that supports
all devices).
Warning:
The device must be connected to the data server.
Args:
serial: Device serial
Returns:
Newly created instrument object
Raises:
RuntimeError: If the device is not connected to the data server
"""
dev_type = self._session.daq_server.getString(f"/{serial}/features/devtype")
return self._device_classes.get(dev_type, tk_devices.BaseInstrument)(
serial, dev_type, self._session
)
def connected(self) -> t.List[str]:
"""Get a list of devices connected to the data server.
Returns:
List of all connected devices.
"""
return (
self._session.daq_server.getString("/zi/devices/connected")
.lower()
.split(",")
)
def visible(self) -> t.List[str]:
"""Get a list of devices visible to the data server.
Returns:
List of all connected devices.
"""
return (
self._session.daq_server.getString("/zi/devices/visible").lower().split(",")
)
def created_devices(self) -> t.ValuesView[tk_devices.DeviceType]:
"""View on all created device.
The list contains all toolkit device objects that have been created for
the underlying session.
Warning: This is not equal to the devices connected to the data server!
Use the iterator of the `Devices` class directly to get all devices
connected to the data server.
"""
return self._devices.values()
class HF2Devices(Devices):
"""Mapping class for the connected HF2 devices.
Maps the connected devices from data server to lazy device objects.
It derives from the general ``Devices`` class and adds the special handling
for the HF2 data server. Since the HF2 Data Server is based on the API Level
1 it as a much more restricted API. This means it is not possible to get
the connected or visible devices from the data server. This class must
track the connected devices itself and use discovery to mimic the
behavior of the new data server used for the other devices.
"""
def _create_device(self, serial: str) -> tk_devices.BaseInstrument:
"""Creates a new device object.
Maps the device type to the correct instrument class (The default is
the ``BaseInstrument`` which is a generic instrument class that supports
all devices).
Warning:
The device must already be connected to the data server
Args:
serial: Device serial
Returns:
Newly created instrument object
Raises:
RuntimeError: If the device is not connected to the data server
ToolkitError: DataServer is HF2, but the device is not.
"""
try:
return super()._create_device(serial)
except RuntimeError as error:
if "ZIAPINotFoundException" in error.args[0]:
discovery = core.ziDiscovery()
discovery.find(serial)
dev_type = discovery.get(serial)["devicetype"]
raise ToolkitError(
"Can only connect HF2 devices to an HF2 data "
f"server. {serial} identifies itself as a {dev_type}."
) from error
raise
def connected(self) -> t.List[str]:
"""Get a list of devices connected to the data server.
Returns:
List of all connected devices.
"""
return list(self._devices.keys())
def visible(self) -> t.List[str]:
"""Get a list of devices visible to the data server.
Returns:
List of all connected devices.
"""
return core.ziDiscovery().findAll()
def add_hf2_device(self, serial: str) -> None:
"""Add a new HF2 device.
Since the HF2 data server is not able to report its connected devices
toolkit manually needs to update the list of known connected devices.
Args:
serial: Serial of the HF2 device
Raises:
ToolkitError: If the device was already added in that session.
"""
if serial in self._devices:
raise ToolkitError(f"Can only create one instance of {serial}.")
self._devices[serial] = self._create_device(serial)
class ModuleHandler:
"""Modules of LabOne.
Handler for all additional so called modules by LabOne. A LabOne module is
bound to a user session but creates a independent session to the Data Server.
This has the advantage that they do not interfere with the user session. It
also means that creating a session causes additional resources allocation,
both at the client and the data server. New modules should therefore only be
instantiated with care.
Toolkit holds a lazy generated instance of all modules. This ensures that
not more than one modules of each type gets created by accident and that the
access to the modules is optimized.
Of course there are many use cases where more than one module of a single
type is required. This class therefore also exposes a ``create`` function for
each LabOne module. These functions create a unmanaged instance of that
module (unmanaged means toolkit does not hold an instance of that module).
Args:
session: Active user session
server_host: Host address of the session
server_port: Port of the session
"""
def __init__(self, session: "Session"):
self._session = session
def __repr__(self):
return str(
"LabOneModules("
f"{self._session.daq_server.host}:{self._session.daq_server.port})"
)
def create_awg_module(self) -> tk_modules.BaseModule:
"""Create an instance of the AwgModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `awg`.
Returns:
Created module
"""
return tk_modules.BaseModule(
self._session.daq_server.awgModule(), self._session
)
def create_daq_module(self) -> tk_modules.DAQModule:
"""Create an instance of the DataAcquisitionModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `daq`.
Returns:
Created module
"""
return tk_modules.DAQModule(
self._session.daq_server.dataAcquisitionModule(), self._session
)
def create_device_settings_module(self) -> tk_modules.DeviceSettingsModule:
"""Create an instance of the DeviceSettingsModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `device_settings_module`.
Returns:
Created module
"""
return tk_modules.DeviceSettingsModule(
self._session.daq_server.deviceSettings(), self._session
)
def create_impedance_module(self) -> tk_modules.ImpedanceModule:
"""Create an instance of the ImpedanceModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `impedance_module`.
Returns:
Created module
"""
return tk_modules.ImpedanceModule(
self._session.daq_server.impedanceModule(), self._session
)
def create_mds_module(self) -> tk_modules.BaseModule:
"""Create an instance of the MultiDeviceSyncModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `mds_module`.
Returns:
Created module
"""
return tk_modules.BaseModule(
self._session.daq_server.multiDeviceSyncModule(), self._session
)
def create_pid_advisor_module(self) -> tk_modules.PIDAdvisorModule:
"""Create an instance of the PidAdvisorModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `pid_advisor_module`.
Returns:
Created module
"""
return tk_modules.PIDAdvisorModule(
self._session.daq_server.pidAdvisor(), self._session
)
def create_precompensation_advisor_module(
self,
) -> tk_modules.PrecompensationAdvisorModule:
"""Create an instance of the PrecompensationAdvisorModule.
In contrast to core.ziDAQServer.precompensationAdvisor() a nodetree property
is added.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `precompensation_advisor_module`.
Returns:
Created module
"""
return tk_modules.PrecompensationAdvisorModule(
self._session.daq_server.precompensationAdvisor(), self._session
)
def create_qa_module(self) -> tk_modules.BaseModule:
"""Create an instance of the QuantumAnalyzerModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `qa_module`.
Returns:
Created module
"""
return tk_modules.BaseModule(
self._session.daq_server.quantumAnalyzerModule(), self._session
)
def create_scope_module(self) -> tk_modules.ScopeModule:
"""Create an instance of the ScopeModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `awg_module`.
Returns:
Created module
"""
return tk_modules.ScopeModule(
self._session.daq_server.scopeModule(), self._session
)
def create_sweeper_module(self) -> tk_modules.SweeperModule:
"""Create an instance of the SweeperModule.
The resulting Module will have the nodetree accessible. The underlying
zhinst.core Module can be accessed through the `raw_module`
property.
The new instance establishes a new session to the DataServer.
New instances should therefor be created carefully since they consume
resources.
The new module is not managed by toolkit. A managed instance is provided
by the property `sweeper_module`.
Returns:
Created module
"""
return tk_modules.SweeperModule(self._session.daq_server.sweep(), self._session)
def create_shfqa_sweeper(self) -> tk_modules.SHFQASweeper:
"""Create an instance of the SHFQASweeper.
For now the general sweeper module does not support the SHFQA. However a
python based implementation called ``SHFSweeper`` does already provide
this functionality. The ``SHFSweeper`` is part of the ``zhinst`` module
and can be found in the utils.
Toolkit wraps around the ``SHFSweeper`` and exposes a interface that is
similar to the LabOne modules, meaning the parameters are exposed in a
node tree like structure.
In addition a new session is created. This has the benefit that the
sweeper implementation does not interfere with the the commands and
setups from the user.
Returns:
Created object
"""
return tk_modules.SHFQASweeper(self._session)
@lazy_property
def awg(self) -> tk_modules.BaseModule:
"""Managed instance of the awg module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_awg_module`` to create an unmanaged instance)
"""
return self.create_awg_module()
@lazy_property
def daq(self) -> tk_modules.DAQModule:
"""Managed instance of the daq module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_daq_module`` to create an unmanaged instance)
"""
return self.create_daq_module()
@lazy_property
def device_settings(self) -> tk_modules.DeviceSettingsModule:
"""Managed instance of the device settings module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_device_settings_module`` to create an
unmanaged instance)
"""
return self.create_device_settings_module()
@lazy_property
def impedance(self) -> tk_modules.ImpedanceModule:
"""Managed instance of the impedance module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_awg_module`` to create an unmanaged instance)
"""
return self.create_impedance_module()
@lazy_property
def mds(self) -> tk_modules.BaseModule:
"""Managed instance of the multi device sync module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_mds_module`` to create an unmanaged instance)
"""
return self.create_mds_module()
@lazy_property
def pid_advisor(self) -> tk_modules.PIDAdvisorModule:
"""Managed instance of the pid advisor module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_pid_advisor_module`` to create an unmanaged
instance)
"""
return self.create_pid_advisor_module()
@lazy_property
def precompensation_advisor(self) -> tk_modules.PrecompensationAdvisorModule:
"""Managed instance of the precompensation advisor module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_precompensation_advisor_module`` to create an
unmanaged instance)
"""
return self.create_precompensation_advisor_module()
@lazy_property
def qa(self) -> tk_modules.BaseModule:
"""Managed instance of the quantum analyzer module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_qa_module`` to create an unmanaged instance)
"""
return self.create_qa_module()
@lazy_property
def scope(self) -> tk_modules.ScopeModule:
"""Managed instance of the scope module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_scope_module`` to create an unmanaged
instance)
"""
return self.create_scope_module()
@lazy_property
def sweeper(self) -> tk_modules.SweeperModule:
"""Managed instance of the sweeper module.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_sweeper_module`` to create an unmanaged instance)
"""
return self.create_sweeper_module()
@lazy_property
def shfqa_sweeper(self) -> tk_modules.SHFQASweeper:
"""Managed instance of the shfqa sweeper implementation.
Managed means that only one instance is created
and is held inside the connection Manager. This makes it easier to access
the modules from within toolkit, since creating a module requires
resources. (``use create_shfqa_sweeper`` to create an unmanaged
instance)
"""
return self.create_shfqa_sweeper()
class PollFlags(IntFlag):
"""Flags for polling Command.
DETECT_AND_THROW(12):
Detect data loss holes and throw EOFError exception
DETECT(8):
Detect data loss holes
FILL(1):
Fill holes
DEFAULT(0):
No Flags
Can be combined with bitwise operations
>>> PollFlags.FILL | PollFlags.DETECT
<PollFlags.DETECT|FILL: 9>
"""
DETECT_AND_THROW = 12
DETECT = 8
FILL = 1
DEFAULT = 0
class Session(Node):
"""Session to a data server.
Zurich Instruments devices use a server-based connectivity methodology.
Server-based means that all communication between the user and the
instrument takes place via a computer program called a server, the data
sever. The data sever recognizes available instruments and manages all
communication between the instrument and the host computer on one side, and
communication to all the connected clients on the other side. (For more
information on the architecture please refer to the user manual
http://docs.zhinst.com/labone_programming_manual/introduction.html)
The entry point into for any connection is therefore a client session to a
existing data sever. This class represents a single client session to a
data server. The session enables the user to connect to one or multiple
instruments (also creates the dedicated objects for each device), access
the LabOne modules and poll data. In short it is the only object the user
need to create by himself.
Info:
Except for the HF2 a single session can be used to connect to all
devices from Zurich Instruments. Since the HF2 is historically based on
another data server called the hf2 data server it is not possible to
connect HF2 devices a "normal" data server and also not possible to
connect devices apart from HF2 to the hf2 data server.
Args:
server_host: Host address of the data server (e.g. localhost)
server_port: Port number of the data server. If not specified the session
uses the default port 8004 (8005 for HF2 if specified).
(default = None)
hf2: Flag if the session should be established with an HF2 data sever or
the "normal" one for all other devices. If not specified the session
will detect the type of the data server based on the port.
(default = None)
connection: Existing DAQ server object. If specified the session will
not create a new session to the data server but reuse the passed
one. (default = None)
"""
def __init__(
self,
server_host: str,
server_port: t.Optional[int] = None,
*,
hf2: t.Optional[bool] = None,
connection: t.Optional[core.ziDAQServer] = None,
):
self._is_hf2_server = bool(hf2)
if connection is not None:
self._is_hf2_server = "HF2" in connection.getString("/zi/about/dataserver")
if hf2 and not self._is_hf2_server:
raise ToolkitError(
"hf2 flag was set but the passed "
"DAQServer instance is not a HF2 data server."
)
if hf2 is False and self._is_hf2_server:
raise ToolkitError(
"hf2 flag was set but the passed "
"DAQServer instance is a HF2 data server."
)
self._daq_server = connection
else:
server_port = server_port if server_port else 8004
if self._is_hf2_server and server_port == 8004:
server_port = 8005
try:
self._daq_server = core.ziDAQServer(
server_host,
server_port,
1 if self._is_hf2_server else 6,
)
except RuntimeError as error:
if "Unsupported API level" not in error.args[0]:
raise
if hf2 is None:
self._is_hf2_server = True
self._daq_server = core.ziDAQServer(
server_host,
server_port,
1,
)
elif not hf2:
raise ToolkitError(
"hf2 Flag was reset but the specified "
f"server at {server_host}:{server_port} is a "
"HF2 data server."
) from error
self._devices = HF2Devices(self) if self._is_hf2_server else Devices(self)
self._modules = ModuleHandler(self)
hf2_node_doc = Path(__file__).parent / "resources/nodedoc_hf2_data_server.json"
nodetree = NodeTree(
self._daq_server,
prefix_hide="zi",
list_nodes=["/zi/*"],
preloaded_json=json.loads(hf2_node_doc.open("r").read())
if self._is_hf2_server
else None,
)
super().__init__(nodetree, tuple())
self._multi_transaction = Transaction(self.root)
def __repr__(self):
return str(
f"{'HF2' if self._is_hf2_server else ''}DataServerSession("
f"{self._daq_server.host}:{self._daq_server.port})"
)
@classmethod
def from_existing_connection(cls, connection: core.ziDAQServer) -> "Session":
"""Initialize Session from an existing connection.
Args:
connection: Existing connection.
.. versionadded:: 0.4.0
"""
is_hf2_server = "HF2" in connection.getString("/zi/about/dataserver")
return cls(
server_host=connection.host,
server_port=connection.port,
hf2=is_hf2_server,
connection=connection,
)
def connect_device(
self, serial: str, *, interface: t.Optional[str] = None
) -> tk_devices.DeviceType:
"""Establish a connection to a device.
Info:
It is allowed to call this function for an already connected device.
In that case the function simply returns the device object of the
device.
If the interface is not specified the interface will be auto detected.
Meaning one of the available interfaces will be selected, prioritizing
1GbE over USB.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the
instrument.
interface: Device interface (e.g. = "1GbE"). If not specified
the default interface from the discover is used.
Returns:
Device object
Raises:
KeyError: Device is not found.
RuntimeError: Connection failed.
"""
serial = serial.lower()
if serial not in self._devices:
if not interface:
if self._is_hf2_server:
interface = "USB"
else:
# Take interface from the discovery
dev_info = json.loads(self.daq_server.getString("/zi/devices"))[
serial.upper()
]
interface = dev_info["INTERFACE"]
if interface == "none":
interface = (
"1GbE"
if "1GbE" in dev_info["INTERFACES"]
else dev_info["INTERFACES"].split(",")[0]
)
self._daq_server.connectDevice(serial, interface)
if isinstance(self._devices, HF2Devices):
self._devices.add_hf2_device(serial)
return self._devices[serial]
def disconnect_device(self, serial: str) -> None:
"""Disconnect a device.
Warning:
This function will return immediately. The disconnection of the
device may not yet finished.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
"""
self._devices.pop(serial, None)
self.daq_server.disconnectDevice(serial)
def sync(self) -> None:
"""Synchronize all connected devices.
Synchronization in this case means creating a defined state.
The following steps are performed:
* Ensures that all set commands have been flushed to the device
* Ensures that get and poll commands only return data which was
recorded after the sync command. (ALL poll buffers are cleared!)
* Blocks until all devices have cleared their busy flag.
Warning:
The sync is performed for all devices connected to the DAQ server
Warning:
This command is a blocking command that can take a substantial
amount of time.
Raises:
RuntimeError: ZIAPIServerException: Timeout during sync of device
"""
self.daq_server.sync()
def poll(
self,
recording_time: float = 0.1,
*,
timeout: float = 0.5,
flags: PollFlags = PollFlags.DEFAULT,
) -> t.Dict[Node, t.Dict[str, t.Any]]:
"""Polls all subscribed data from the data server.
Poll the value changes in all subscribed nodes since either subscribing
or the last poll (assuming no buffer overflow has occurred on the Data
Server).
Args:
recording_time: Defines the duration of the poll in seconds. (Note that not
only the newly recorded values are polled but all values since
either subscribing or the last poll). Needs to be larger than
zero. (default = 0.1)
timeout: Adds an additional timeout in seconds on top of
`recording_time`. Only relevant when communicating in a slow
network. In this case it may be set to a value larger than the
expected round-trip time in the network. (default = 0.5)
flags: Flags for the polling (see :class `PollFlags`:)
Returns:
Polled data in a dictionary. The key is a `Node` object and the
value is a dictionary with the raw data from the device
"""
return NodeDict(
self.daq_server.poll(
recording_time, int(timeout * 1000), flags=flags.value, flat=True
)
)
def raw_path_to_node(
self, raw_path: str, *, module: tk_modules.ModuleType = None
) -> Node:
"""Converts a raw node path string into a Node object.
The device that this strings belongs to must be connected to the Data
Server. Optionally a module can be specified to which the node belongs to.
(The module is only an additional search path, meaning even if a module
is specified the node can belong to a connected device.)
Args:
raw_path: Raw node path (e.g. /dev1234/relative/path/to/node).
Returns:
Corresponding toolkit node object.
Raises:
ValueError: If the `raw_path` does not start with a leading dash.
ToolkitError: If the node does not belong to the optional module or
to a connected device.
.. versionchanged:: 0.5.3
Changed `RuntimeError` to `ValueError`.
"""
if not raw_path.startswith("/"):
raise ValueError(
f"{raw_path} does not seem to be an absolute path. "
"It must start with a leading slash."
)
if module is not None:
node = module.root.raw_path_to_node(raw_path)
if node.raw_tree[0] in module.root:
return node
try:
serial = raw_path.split("/")[1]
if serial == "zi":
return self.root.raw_path_to_node(raw_path)
return self.devices[serial].root.raw_path_to_node(raw_path)
except KeyError as error:
raise ToolkitError(
f"Node belongs to a device({raw_path.split('/')[1]}) not connected to "
"the Data Server."
) from error
@contextmanager
def set_transaction(self) -> t.Generator[None, None, None]:
"""Context manager for a transactional set.
Can be used as a context in a with statement and bundles all node set
commands into a single transaction. This reduces the network overhead
and often increases the speed.
In comparison to the device level transaction manager this manager
affects all devices that are connected to the Session and bundles all
set commands into a single transaction.
Within the with block a set commands to a node will be buffered
and bundled into a single command at the end automatically.
(All other operations, e.g. getting the value of a node, will not be
affected)
Warning:
The set is always performed as deep set if called on device nodes.
Examples:
>>> with session.set_transaction():
device1.test[0].a(1)
device2.test[0].a(2)
.. versionadded:: 0.4.0
"""
self._multi_transaction.start()
for device in self.devices.created_devices():
device.root.transaction.start(self._multi_transaction.add)
self.root.transaction.start(self._multi_transaction.add)
try:
yield
self._daq_server.set(self._multi_transaction.result())
finally:
for device in self.devices.created_devices():
device.root.transaction.stop()
self.root.transaction.stop()
self._multi_transaction.stop()
@property
def multi_transaction(self) -> Transaction:
"""Flag if a session wide transaction is in progress.
.. versionadded:: 0.4.0
"""
return self._multi_transaction
@property
def devices(self) -> Devices:
"""Mapping for the connected devices."""
return self._devices
@property
def modules(self) -> ModuleHandler:
"""Modules of LabOne."""
return self._modules
@property
def is_hf2_server(self) -> bool:
"""Flag if the data server is a HF2 Data Server."""
return self._is_hf2_server
@property
def daq_server(self) -> core.ziDAQServer:
"""Managed instance of the core.ziDAQServer."""
return self._daq_server
@property
def server_host(self) -> str:
"""Server host."""
return self._daq_server.host
@property
def server_port(self) -> int:
"""Server port."""
return self._daq_server.port | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/session.py | session.py |
import json
import typing as t
import warnings
from collections.abc import MutableMapping
from enum import IntFlag
from io import BytesIO
import numpy as np
from elftools.elf.elffile import ELFFile
from elftools.common.exceptions import ELFError
from zhinst.utils import convert_awg_waveform, parse_awg_waveform
from zhinst.toolkit.exceptions import ValidationError
_Waveform = t.Tuple[np.ndarray, t.Optional[np.ndarray], t.Optional[np.ndarray]]
class OutputType(IntFlag):
"""Waveform output type.
OUT1: Enables the output 1 for the respective wave.
OUT2: Enables the output 2 for the respective wave.
.. versionadded:: 0.3.5
"""
OUT1 = 1
OUT2 = 2
class Wave(np.ndarray):
"""Numpy array subclass containing additional waveform metadata.
This class takes a standard ndarray that already exists, casts as Wave
type, and adds the following extra attributes/metadata:
* name
* output
The additional metadata is only used for the sequencer code generation.
(Based on https://numpy.org/doc/stable/user/basics.subclassing.html)
Args:
input_array: existing ndarray
name: optional name of the waveform in the sequencer code snippet.
output: optional output configuration for the waveform in the
sequencer code snippet.
.. versionadded:: 0.3.5
"""
def __new__(
cls,
input_array,
name: t.Optional[str] = None,
output: t.Optional[OutputType] = None,
) -> "Wave":
"""Casts an existing ndarray to a Wave type.
Args:
input_array: existing ndarray
name: optional name of the waveform in the sequencer code snippet.
output: optional output configuration for the waveform in the
sequencer code snippet.
Returns:
Array as Wave object.
"""
obj = np.asarray(input_array).view(cls)
obj.name = name
obj.output = output
return obj
def __array_finalize__(self, obj: t.Optional[np.ndarray]) -> None:
if obj is None:
return
self.name = getattr(obj, "name", None)
self.output = getattr(obj, "output", None)
class Waveforms(MutableMapping):
"""Waveform dictionary.
The key specifies the slot of the waveform on the device.
The value is a the waveform itself, represented by a tuple
(wave1, wave2, marker).
The value tuple(wave1, wave2=None, marker=None) consists of the following parts:
* wave1 (array): Array with data of waveform 1.
* wave2 (array): Array with data of waveform 2.
* markers (array): Array with marker data.
A helper function exist called `assign_waveform` which provides an easy way
of assigning waveforms to slots. But one can also use the direct dictionary
access:
>>> wave = 1.0 * np.ones(1008)
>>> markers = np.zeros(1008)
>>> waveforms = Waveforms()
>>> waveforms.assign_waveform(0, wave)
>>> waveforms.assign_waveform(1, wave, -wave)
>>> waveforms.assign_waveform(2, wave, -wave, markers)
>>> waveforms.assign_waveform(3, wave, markers=markers)
>>> waveforms[4] = (wave,)
>>> waveforms[5] = (wave, -wave)
>>> waveforms[6] = (wave, -wave, markers)
>>> waveforms[7] = (wave, None, markers)
The arrays can be provided as arrays of integer, float. The first wave also
can be of type complex. In that case the second waveform must be `None`.
Depending on the target format the function `get_raw_vector` converts the
waves into the following format:
* native AWG waveform format (interleaved waves and markers as uint16) that
can be uploaded to the AWG waveform nodes. In case the first wave is of
type complex the imaginary part is treated as the second wave.
* complex waveform format that can be uploaded to the generator waveform
nodes (does not support markers). In case two real waveforms have been
specified they are combined into a single complex waveform, where the
imaginary part defined by the second wave.
"""
def __init__(self):
self._waveforms = {}
def __getitem__(self, slot: int) -> _Waveform:
return self._waveforms[slot]
def __setitem__(self, slot: int, value: t.Union[np.ndarray, _Waveform]):
if isinstance(value, np.ndarray):
self._set_waveform(slot, (value, None, None))
else:
self._set_waveform(slot, value)
def __delitem__(self, slot: int):
del self._waveforms[slot]
def __iter__(self):
return iter(self._waveforms)
def __len__(self):
return len(self._waveforms)
def assign_waveform(
self,
slot: int,
wave1: np.ndarray,
wave2: t.Optional[np.ndarray] = None,
markers: t.Optional[np.ndarray] = None,
) -> None:
"""Assigns a waveform to a slot.
Args:
slot: slot number
wave1: Array with data of waveform 1.
wave2: Array with data of waveform 2. (default = None)
markers: Array with marker data. (default = None)
"""
self._set_waveform(slot, (wave1, wave2, markers))
def assign_native_awg_waveform(
self,
slot: int,
raw_waveform: np.ndarray,
channels: int = 1,
markers_present: bool = False,
) -> None:
"""Assigns a native AWG waveform to a slot.
Native AWG waveform = a single waveform (interleaved waves and markers
as uint16).
Args:
slot: slot number
raw_waveform: native AWG waveform.
channels: Number of channels present in the wave. (default = 1)
markers_present: Indicates if markers are interleaved in the wave.
(default = False)
"""
wave1, wave2, markers = parse_awg_waveform(
raw_waveform,
channels=channels,
markers_present=markers_present,
)
if markers_present and channels == 2:
self._waveforms[slot] = (wave1, wave2, markers)
elif channels == 2:
self._waveforms[slot] = (wave1, wave2, None)
elif markers_present:
self._waveforms[slot] = (wave1, None, markers)
else:
self._waveforms[slot] = (wave1, None, None)
def _set_waveform(
self,
slot: int,
value: _Waveform,
) -> None:
"""Assigns a tuple of waves to the slot.
The passed waves are validated against the following requirements:
* At least one wave must be defined
* At most three waves are defined
* The waves must by numpy arrays
* The waves must have the same length
* If the first wave is complex teh second wave must be None
Raises:
RuntimeError: If the tuple does not comply to the requirements.
"""
if len(value) < 1 or len(value) > 3:
raise RuntimeError(
"Only one(complex) or two(real) waveforms (plus an optional marker) "
f"can be specified per waveform. ({len(value)} where specified."
)
if (
not isinstance(value[0], np.ndarray)
or (
len(value) > 2
and value[1] is not None
and not isinstance(value[1], np.ndarray)
)
or (
len(value) > 3
and value[1] is not None
and not isinstance(value[2], np.ndarray)
)
):
raise RuntimeError("Waveform must be specified as numpy.arrays")
if len(value) >= 2 and value[1] is not None and len(value[0]) != len(value[1]):
raise RuntimeError("The two waves must have the same length")
if len(value) == 3 and value[2] is not None and len(value[0]) != len(value[2]):
raise RuntimeError(
"The marker must have the same length than the waveforms"
)
if np.iscomplexobj(value[0]) and not (len(value) < 3 or value[1] is None):
raise RuntimeError(
"The first waveform is complex therefore only one "
"waveform can be specified."
)
self._waveforms[slot] = tuple(
w.view(Wave) if w is not None else None for w in value
) + (None,) * (3 - len(value))
def get_raw_vector(
self,
slot: int,
*,
complex_output: bool = False,
) -> np.ndarray:
"""Get the raw vector for a slot required by the device.
Either converts a waveform into the native AWG waveform format that can
be uploaded to the AWG wave node or converts the waveform into a complex
waveform that can be uploaded to a generator wave node.
(complex_output = True).
Args:
slot: slot number of the waveform
complex_output: Flag if the output should be a complex waveform for a
generator node, instead of of the native AWG format that can
only be uploaded to an AWG node. (default = False)
Returns:
Waveform in the native AWG format or as a complex waveform
Raises:
ValueError: The length of the waves does not match the target length.
.. versionchanged:: 0.4.2
Removed `target_length` flag and functionality. The length check is
now done in the `validate` function.
"""
waves = self._waveforms[slot]
wave1 = np.zeros(1) if len(waves[0]) == 0 else waves[0]
wave2 = np.zeros(1) if waves[1] is not None and len(waves[1]) == 0 else waves[1]
marker = waves[2]
if complex_output and np.iscomplexobj(wave1):
if wave2 is not None or marker is not None:
warnings.warn("Complex values do not support markers", RuntimeWarning)
return wave1
if complex_output and not np.iscomplexobj(wave1):
if marker is not None:
warnings.warn("Complex values do not support markers", RuntimeWarning)
complex_wave = np.zeros(wave1.shape, dtype=np.complex128)
complex_wave.real = wave1
if wave2 is not None:
complex_wave.imag = wave2
return complex_wave
if np.iscomplexobj(wave1):
marker = wave2 if wave2 is not None else marker
wave2 = wave1.imag
wave1 = wave1.real
return convert_awg_waveform(
wave1,
wave2=wave2,
markers=marker if marker is not None else None,
)
def _get_waveform_sequence(self, index: int) -> str:
"""Get sequencer code snippet for a single waveform.
The sequencer code snippet is generated with the following information:
* Waveform length
* Waveform index
* presence of markers and for which channel
* Defined names of the waveforms (if set)
* Defined output configuration (if set)
Returns:
Sequencer code snippet.
.. versionadded:: 0.3.5
"""
waves = self._waveforms[index]
wave_length = max(1, waves[0].size)
w2_present = waves[1] is not None
marker = waves[2]
names = [waves[0].name, waves[1].name if waves[1] is not None else None]
outputs = [waves[0].output, waves[1].output if waves[1] is not None else None]
if np.iscomplexobj(waves[0]):
marker = waves[1] if waves[1] is not None else marker
w2_present = True
names = names if not names[0] or isinstance(names[0], str) else names[0]
outputs = (
outputs
if not outputs[0] or not isinstance(outputs, t.Iterable)
else outputs[0]
)
marker = None if marker is None else np.unpackbits(marker.astype(np.uint8))
def marker_to_bool(i: int) -> str:
return "true" if np.any(marker[7 - i :: 8]) else "false" # noqa: E203
def to_wave_str(i: int) -> str:
if marker is None:
return f"placeholder({wave_length}, false, false)"
return (
f"placeholder({wave_length}, {marker_to_bool(i*2)}, "
+ f"{marker_to_bool(i*2+1)})"
)
w1_assign = to_wave_str(0)
w2_assign = to_wave_str(1) if w2_present else ""
w2_decl = w1_decl = ""
if names[0]:
w1_decl = f"wave {names[0]} = {w1_assign};\n"
w1_assign = names[0]
if names[1]:
w2_decl = f"wave {names[1]} = {w2_assign};\n"
w2_assign = names[1]
if outputs[0]:
if outputs[0] in [OutputType.OUT1, OutputType.OUT2]:
w1_assign = f"{outputs[0]}, {w1_assign}"
elif outputs[0] == OutputType.OUT1 | OutputType.OUT2:
w1_assign = f"1, 2, {w1_assign}"
if outputs[1]:
if outputs[1] in [OutputType.OUT1, OutputType.OUT2]:
w2_assign = f"{outputs[1]}, {w2_assign}"
elif outputs[1] == OutputType.OUT1 | OutputType.OUT2:
w2_assign = f"1, 2, {w2_assign}"
if w2_assign:
return (
f"{w1_decl}{w2_decl}assignWaveIndex({w1_assign}, {w2_assign}, {index});"
)
return f"{w1_decl}assignWaveIndex({w1_assign}, {index});"
def get_sequence_snippet(self) -> str:
"""Return a sequencer code snippet for the defined waveforms.
Based on the defined waveforms and their additional information this
function generates a sequencer code snippet that can be used to define
the given waveforms. The following information will be used:
* Waveform length
* Waveform index
* presence of markers and for which channel
* Defined names of the waveforms (if set)
* Defined output configuration (if set)
Example:
>>> waveform = Waveform()
>>> waveform.assign_waveform(
0,
wave1=Wave(
np.ones(1008),
name="w1",
output=OutputType.OUT1 | OutputType.OUT2
),
wave2=Wave(
-np.ones(1008),
name="w2",
output=OutputType.OUT2),
markers=15 * np.ones(1008),
)
>>> waveform.get_sequence_snippet()
wave w1 = placeholder(1008, true, true);
wave w2 = placeholder(1008, true, true);
assignWaveIndex(1, 2, w1, 2, w2, 0);
Returns:
Sequencer Code snippet.
.. versionadded:: 0.3.5
"""
return "\n".join(
[
self._get_waveform_sequence(slot)
for slot in sorted(self._waveforms.keys())
]
)
def validate(self, meta_info: t.Union[bytes, str], *, allow_missing=True) -> None:
"""Validates the waveforms against the ones defined in a sequencer program.
The information about the sequencer code can either be passed in form
of a compiled elf file or a the waveform descriptor provided by the
device once a valid sequencer code was uploaded to the device.
The waveform descriptor can be read from the device through the node
`<path to awg core>.waveform.descriptors`
(`e.g hdawg.awgs[0].waveform.descriptors()`).
Args:
meta_info: Compiled sequencer code or the waveform descriptor.
allow_missing: Flag if this function allows placeholder waveforms
to be defined in the sequencer code that are not used in this
object. This is disabled by default since uploading/replacing
only a fraction of the defined waveforms is a valid use case.
Raises:
TypeError: If the meta_info are not a compiled elf file, string or
dictionary.
ValidationError: If the Validation fails.
.. versionadded:: 0.4.2
"""
waveform_info = {}
try:
elf_info = ELFFile(BytesIO(meta_info)) # type: ignore[arg-type]
raw_data = elf_info.get_section_by_name(".waveforms").data().decode("utf-8")
waveform_info = json.loads(raw_data)["waveforms"]
except (TypeError, ELFError) as e:
if isinstance(meta_info, str):
waveform_info = json.loads(meta_info)["waveforms"]
elif isinstance(meta_info, dict):
waveform_info = (
meta_info["waveforms"] if "waveforms" in meta_info else meta_info
)
else:
raise TypeError(
"meta_info needs to be an elf file or the waveform descriptor from "
"the device (e.g. device.awgs[0].waveform.descriptor(). The passed "
f"meta_info are of type {type(meta_info)} ({str(meta_info)})."
) from e
defined_wave_lengths = {
index: wave["length"]
for index, wave in enumerate(waveform_info)
if wave["name"].startswith("__placeholder")
or wave["name"].startswith("__playWave")
}
for index, waves in self._waveforms.items():
if index >= len(waveform_info):
raise IndexError(
f"There are {len(waveform_info)} waveforms defined on the device "
f"but the passed waveforms specified one with index {index}."
)
try:
target_length = int(defined_wave_lengths[index])
except KeyError as e:
if "__filler" in waveform_info[index]["name"]:
raise ValidationError(
f"The waveform at index {index} is only "
"a filler and can not be overwritten."
) from e
raise ValidationError(
f"The waveform at index {index} is not a placeholder but of "
f"type {waveform_info[index]['name'].lstrip('__')[:-4]}"
) from e
wave_length = max(len(waves[0]), 1)
if wave_length != target_length:
# Waveforms can only be to short since the compiler always rounds
# up the length to next valid value.
raise ValidationError(
f"Waveforms at index {index} are smaller than the target length "
f"{wave_length} < {target_length}."
)
if not allow_missing and len(defined_wave_lengths) > len(self._waveforms):
missing_indexes = [
i for i in defined_wave_lengths.keys() if i not in self._waveforms
]
raise ValidationError(
"The the sequencer code defines placeholder waveforms for the "
f"following indexes that are missing in this object: {missing_indexes}"
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/waveform.py | waveform.py |
from zhinst.toolkit.waveform import Waveforms
import re
import typing as t
class Sequence:
r"""A representation of a ZI sequencer code.
This class enables a compact representation of a sequence for a Zurich
Instruments device. Although a sequencer code can be represented by a
simple string this class offers the following advantages:
* Define a constants dictionary. The constants will be added
automatically to the top of the resulting sequencer code and helps
to prevent the use of fstrings (which require the escaping of {})
* Link Waveforms to the sequence. This adds the waveform placeholder
definitions to the top of the resulting sequencer code.
Note:
This class is only for convenience. The same functionality can be
achieved with a simple string.
Args:
code: Sequencer code (default = None).
constants: A dictionary of constants to be added to the top of the
resulting sequencer code. (default = None).
waveforms: Waveforms that will be used in the sequence.
Example:
>>> waveforms = Waveforms()
>>> waveforms[0] = (0.5*np.ones(1008), -0.2*np.ones(1008), np.ones(1008))
>>> sequencer = Sequence()
>>> sequencer.constants["PULSE_WIDTH"] = 10e-9 #ns
>>> sequencer.waveforms = waveforms
>>> sequencer.code = \"\"\"\
// Hello World
repeat(5)
...
\"\"\"
>>> str(sequencer)
// Constants
const PULSE_WIDTH = 10e-9;
// Waveforms declaration
assignWaveIndex(placeholder(1008, true, false), placeholder(1008, \
false, false), 0);
assignWaveIndex(placeholder(1008, false, false), placeholder(1008, \
false, false), 2);
// Hello World
repeat(5)
...
"""
def __init__(
self,
code: str = None,
*,
constants: t.Dict[str, float] = None,
waveforms: Waveforms = None,
):
self._partial_seq = code if code else ""
self._constants = constants if constants else {}
self._waveforms = waveforms
def __str__(self) -> str:
return self.to_string()
def to_string(self, *, waveform_snippet: bool = True) -> str:
"""Convert the object into a string.
Args:
waveform_snippet: Flag if the waveform declaration should be added
to the top of the resulting sequence. (default = True).
Returns:
String representation of the sequence.
"""
sequence = self._partial_seq
if waveform_snippet and self._waveforms:
sequence = (
"// Waveforms declaration\n"
+ self._waveforms.get_sequence_snippet()
+ "\n"
+ sequence
)
new_constants = {}
for key, value in self._constants.items():
constant_regex = re.compile(rf"(const {key} *= *)(.*);")
if constant_regex.search(sequence):
sequence = constant_regex.sub(rf"\g<1>{value};", sequence)
else:
new_constants[key] = value
if len(new_constants) > 0:
sequence = (
"// Constants\n"
+ "\n".join(
[f"const {key} = {value};" for key, value in new_constants.items()]
)
+ "\n"
+ sequence
)
return sequence
@property
def code(self):
"""Code of the Sequence."""
return self._partial_seq
@code.setter
def code(self, value):
"""Code of the Sequence."""
self._partial_seq = value
@property
def constants(self):
"""Constants of the Sequence."""
return self._constants
@constants.setter
def constants(self, value):
"""Constants of the Sequence."""
self._constants = value
@property
def waveforms(self):
"""Waveforms of the Sequence."""
return self._waveforms
@waveforms.setter
def waveforms(self, value):
"""Waveforms of the Sequence."""
self._waveforms = value | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/sequence.py | sequence.py |
import copy
import json
import typing as t
import jsonref
import jsonschema
from zhinst.toolkit.exceptions import ValidationError
# JSON Schema validator for validating the schemes.
JSON_SCHEMA_VALIDATOR = jsonschema.Draft4Validator
def _validate_instance(instance: object, schema: dict, validator=JSON_SCHEMA_VALIDATOR):
"""Validate JSON instance.
Args:
instance: Instance to be validated.
schema: Schema
validation: Validator
Raises:
ValidationError: Validation failed.
"""
try:
jsonschema.validate(
instance=instance,
schema=schema,
cls=validator,
)
except jsonschema.ValidationError as e:
raise ValidationError(str(e)) from None
class ParentNode:
"""ParentNode of the command table.
ParentNode can contain one or multiple arguments and child ParentNodes.
It offers a dictionary-like object to manipulate command table properties.
Similar to the device nodes, it supports accessing the properties by attribute.
Args:
schema: JSON schema of the node.
path: Path representation of the node.
active_validation: Enable active validation.
"""
def __init__(
self, schema: dict, path: t.Tuple[str, ...], active_validation: bool = True
):
self._schema = copy.deepcopy(schema)
self._path = path
self._childs: t.Dict[t.Union[str, int], t.Any] = {}
self._active_validation = active_validation
def __repr__(self) -> str:
return "/" + "/".join(self._path)
def _validate_instance(self, instance: object, schema: dict):
"""Validate JSON instance.
Args:
instance: Instance to be validated.
schema: Schema
Raises:
ValidationError: Validation failed.
"""
if self._active_validation:
_validate_instance(instance, schema)
def is_empty(self) -> bool:
"""Check if the Node is empty and has no properties.
Returns:
bool: If children exists.
"""
return not bool(self._childs)
def _change_active_validation(obj: ParentNode, value: bool):
"""Change object active validation state.
Args:
obj: Object
value: State of validation
"""
obj._active_validation = value
class ParentEntry(ParentNode):
"""Parent entry of the CommandTable.
The parent can have both properties and child properties.
Args:
schema: JSON schema of the node.
path: Path representation of the node.
active_validation: Enable active validation.
"""
def __init__(
self, schema: dict, path: t.Tuple[str, ...], active_validation: bool = True
):
super().__init__(schema, path, active_validation)
self._attributes = {}
self._child_props = {}
self._properties: t.Dict[str, t.Any] = {}
# ParentEntry does not have json list entries at this moment.
for name, property_ in schema["properties"].items():
if "properties" in property_:
self._child_props[name] = property_
else:
self._attributes[name] = property_
def __contains__(self, k):
return k in self._child_props or self._attributes.get(k, None) is not None
def __dir__(self):
dir_info = set()
for k in super().__dir__():
if not k.startswith("_"):
dir_info.add(k)
return dir_info.union(set(list(self._schema["properties"].keys())))
def __getattr__(self, name: str) -> t.Union["ParentEntry", t.Any]:
if name.startswith("_"):
return None
try:
return self._childs[name]
except KeyError:
if name in self._child_props:
self._childs[name] = ParentEntry(
self._child_props[name],
self._path + (name,),
self._active_validation,
)
return self._childs[name]
if name in self._attributes:
return self._properties.get(name, None)
raise AttributeError(
f"{name}. Available entries: {self._available_attributes()}"
)
def __setattr__(self, name: str, value: t.Any):
if name.startswith("_"):
super().__setattr__(name, value)
elif self._attributes and name in self._attributes:
if value is None:
self._childs.pop(name, None)
self._properties.pop(name, None)
else:
self._validate_instance(value, self._attributes[name])
self._childs[name] = value
self._properties[name] = value
elif value is None and name in self._childs:
self._childs.pop(name)
else:
raise AttributeError(
f"{name}. Available entries: {self._available_attributes()}"
)
def _available_attributes(self) -> t.List[str]:
"""Available property attributes for the instance."""
return list(
self._attributes.keys() if self._attributes else self._child_props.keys()
)
def as_dict(self) -> dict:
"""Return a dictionary presentation of the table node.
Returns:
dict: Table node as dictionary.
"""
result = {}
for name, child in self._childs.items():
if isinstance(child, (ParentEntry, HeaderEntry)):
if not child.is_empty():
result[name] = child.as_dict()
else:
result[name] = child
return result
def info(self, value: t.Optional[str] = None) -> dict:
"""Get info about the property.
Args:
value: Info about to specific property. Otherwise
return info about the whole property.
Returns:
Info about the property.
"""
return self._schema["properties"].get(value, None) if value else self._schema
def clear(self) -> None:
"""Clear all properties from the object."""
self._childs = {}
self._properties = {}
class ListEntry(ParentNode):
"""List entry of a command table.
Args:
schema: JSON schema of the node.
path: Path representation of the node.
active_validation: Enable active validation.
"""
def __init__(
self, schema: dict, path: t.Tuple[str, ...], active_validation: bool = True
):
super().__init__(schema, path, active_validation)
self._min_length = schema["minItems"]
self._max_length = schema["maxItems"]
self._index_schema = schema["items"]["properties"]["index"]
self._schema["items"]["properties"].pop("index", None)
def __len__(self):
return len(self._childs)
def __getitem__(self, number: int) -> ParentEntry:
self._validate_instance(number, self._index_schema)
try:
return self._childs[number]
except KeyError:
self._childs[number] = ParentEntry(
self._schema["items"],
self._path + (str(number),),
self._active_validation,
)
return self._childs[number]
def __delitem__(self, key):
del self._childs[key]
@property
def range(self) -> t.Tuple[int, int]:
"""Get the range for number of minimum and maximum items in the table.
Returns:
Range for number of items in the table.
"""
return (self._min_length, self._max_length)
def as_list(self) -> t.List[dict]:
"""Return a list representation of the table.
Returns:
List of dictionary representation of entries in the table.
"""
table = []
for name, child in self._childs.items():
if isinstance(child, ParentEntry):
json_ = child.as_dict()
if json_:
item = {"index": name}
item.update(json_)
table.append(item)
return table
class HeaderEntry(ParentEntry):
"""Header entry of a command table.
Args:
schema: JSON schema of the node.
path: Path representation of the node.
version: JSON schema version
active_validation: Enable active validation.
"""
def __init__(
self,
schema: dict,
path: tuple,
version: t.Optional[str] = None,
active_validation: bool = True,
):
super().__init__(schema, path, active_validation)
# L1 22.08 new schema format
if version:
self._childs["version"] = version
else:
self._childs["version"] = schema["properties"]["version"]["enum"][0]
@property
def version(self) -> str:
"""Version of the schema."""
return self._childs["version"]
def _derefence_json(schema: t.Union[str, dict]) -> t.Any:
"""Dereference JSON schema.
Args:
schema: JSON schema as a string or dictionary.
Returns:
Dereferenced schema.
Raises:
ValueError: Wrong `schema` type.
"""
if isinstance(schema, str):
return jsonref.loads(schema, jsonschema=True)
if isinstance(schema, dict):
return jsonref.loads(json.dumps(schema), jsonschema=True)
raise ValueError(schema)
class CommandTable:
"""Representation of a ZI device command table.
The class provides functionality to create and modify existing command tables.
The CommandTable can be modified by via ``header`` and ``table`` properties.
Args:
json_schema: JSON Schema of the command table.
active_validation: Active validation of table entries. (default = True)
Active validation enabled:
Each time a table entry is accessed, the values are validated
against the given JSON schema. It is suggested to keep disabled in
production code as it will slow the command table creation.
Active validation disabled:
No validation happens during command table entry modifications, thus
making the creation of the command table faster.
.. versionadded:: 0.5.0
The ``active_validation`` parameter was added.
Example:
.. code-block:: python
>>> from zhinst.toolkit import CommandTable
>>> ct = CommandTable(json_schema)
The ``header`` and ``table`` and then be called:
.. code-block:: python
>>> ct.header.version
"1.1"
>>> ct.header.userString = "My table"
>>> ct.table[0].amplitude.value = 1
>>> ct.table[0].amplitude
1
>>> ct.as_dict()
Active validation
Using active validation, error raised instantly on incorrect value:
.. code-block:: python
>>> ct = CommandTable(json_schema, active_validation=True)
>>> ct.table[0].amplitude0.value = 999e9
ValidationError
Disabling active validation:
No ``ValidationError`` is raised during the creation of the command table,
but once it is uploaded or called ``as_dict()``, the validation happens.
.. code-block:: python
>>> ct = CommandTable(json_schema, active_validation=False)
>>> ct.table[0].amplitude0.value = 999e9 # No errors raised
>>> ct.as_dict()
ValidationError
Disabling active validation improves the speed of large command tables:
.. code-block:: python
>>> for i in range(1024):
>>> ct.table[i].waveform.index = 1
>>> ct.table[i].amplitude0.value = 1
>>> ct.table[i].amplitude1.value = -0.0
>>> ct.table[i].amplitude0.increment = False
>>> ct.table[i].amplitude0.increment = True
"""
def __init__(self, json_schema: t.Union[str, dict], active_validation: bool = True):
self._ct_schema: t.Dict = _derefence_json(json_schema)
self._active_validation = active_validation
self._header: HeaderEntry = self._header_entry()
self._table: ListEntry = self._table_entry()
@property
def active_validation(self) -> bool:
"""State of active validation.
Returns:
True if active validation is enabled.
.. versionadded:: 0.5.0
"""
return self._active_validation
@active_validation.setter
def active_validation(self, value: bool):
"""Active validation.
Args:
value: The state of active validation.
.. versionadded:: 0.5.0
"""
self._active_validation = value
_change_active_validation(self._table, value)
_change_active_validation(self._header, value)
@property
def header(self) -> HeaderEntry:
"""Header of the built command table."""
return self._header
@property
def table(self) -> ListEntry:
"""Table entry of the built command table."""
return self._table
def _header_entry(self) -> HeaderEntry:
return HeaderEntry(
self._ct_schema["definitions"]["header"],
("header",),
self._ct_schema.get("version", ""),
self._active_validation,
)
def _table_entry(self) -> ListEntry:
return ListEntry(
self._ct_schema["definitions"]["table"], ("table",), self._active_validation
)
def clear(self) -> None:
"""Clear CommandTable back to its initial state."""
self._header = self._header_entry()
self._table = self._table_entry()
def as_dict(self) -> dict:
"""Return a dictionary representation of the :class:`CommandTable`.
The function formats the returner value into a schema which is
accepted by the ZI devices which support command tables.
The table is validated against the given schema.
Returns:
CommandTable as a Python dictionary.
Raises:
:class:`~zhinst.toolkit.exceptions.ValidateError`: The command table
does not correspond to the given JSON schema.
.. versionchanged:: 0.4.2
Removed `$schema` key from resulting dictionary.
"""
result = {
"header": self._header.as_dict(),
"table": self._table.as_list(),
}
_validate_instance(result, self._ct_schema)
return result
def update(self, command_table: t.Union[str, dict]) -> None:
"""Update the existing instance of ``CommandTable`` with command table JSON.
If both command tables have the same properties, the existing ones
are overwritten by the new command table.
Args:
command_table: Existing command table JSON.
"""
def json_to_dict(json_: t.Union[str, t.Dict]) -> t.Dict:
if isinstance(json_, str):
json_ = json.loads(json_)
return json_ # type: ignore[return-value]
command_table = json_to_dict(copy.deepcopy(command_table))
_validate_instance(command_table, self._ct_schema)
def build_nodes(path: t.Optional[ParentEntry], index: int, obj: dict):
for k, v in obj.items():
if isinstance(v, dict):
build_nodes(getattr(path, k), index, v)
else:
setattr(path, k, v)
def build_header_nodes(header: HeaderEntry, obj: dict):
for k, v in obj.items():
setattr(header, k, v)
build_header_nodes(self._header, command_table["header"])
for item in command_table["table"]:
index = item.pop("index", None)
build_nodes(self._table[index], index, item) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/command_table.py | command_table.py |
import fnmatch
import json
import numbers
import re
import time
import typing as t
from collections import namedtuple
from collections.abc import Sequence
from enum import IntEnum
import numpy as np
from zhinst.toolkit.nodetree.helper import (
NodeDict,
lazy_property,
resolve_wildcards_labone,
)
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.nodetree import NodeTree
class NodeEnumMeta:
"""Custom Metaclass for NodeEnum.
Note: Required to enable pickling of a NodeEnum value.
It simply servers the purpose to recreate a NodeEnum for a given enum
value. Since the NodeEnums are created dynamically there is no way recreate
a NodeEnum value since python can not find the definition. This class
bypasses this problem by providing the functionality to recreate the
Enum on the fly.
Warning: Although the class of the resulting enum object looks and feels
the same as the original one it is not. Therefore comparing the `type` will
fail. This is however the only limitation.
(type(value_old) != type(value_new) but value_old == value_new)
Args:
value: Value of the NodeEnum object that should be created.
class_name: Name of the NodeEnum class.
names: Mapping of the enum names to their corresponding integer value.
module: Should be set to the module this class is being created in.
"""
def __new__( # noqa: D102
cls, value: int, class_name: str, names: t.Dict[str, int], module: str
):
new_enum = NodeEnum(class_name, names, module=module)
return new_enum(value)
class NodeEnum(IntEnum):
"""Custom dynamically picklable IntEnum class.
The Enum values for a device are created dynamically in toolkit based on
the node informations. Since they are not predefined but rather created
dynamically, the are not picklable. This custom child class of IntEnum
overwrites the reduce function that returns all information required to
recreate the Enum class in `NodeEnumMeta`.
For more information on the reduce functionality and how it is used within
the pickle package see
[pep307](https://peps.python.org/pep-0307/#extended-reduce-api).
"""
# Required for typing
def __init__(self, *args, **kwargs):
...
# Required for typing
def __call__(self, *args, **kwargs): # noqa: D102 # pragma: no cover
...
def __reduce_ex__(self, _):
return NodeEnumMeta, (
self._value_,
self.__class__.__name__,
{key: int(value) for key, value in self.__class__._member_map_.items()},
self.__class__.__module__,
)
class NodeInfo:
"""Class that holds the additional information for a single node.
LabOne provides for each leaf node a dictionary of so called node info
(In addition toolkit or the user may also add some information for selected
nodes). This class wraps around these information and exposes them to a user
in a friendly way.
During the initialization is fetches the relevant information from the
root of the nodetree and stores them internally.
Args:
node: A node the information belong to.
.. versionchanged:: 0.5.0
Add support for signals in sample nodes. The daq module of LabOne
supports subscribing to signals of samples nodes directly. They can be
specified by appending them with a dot to the node path
(e.g. /dev1234/demods/0/sample.x). The change now support these signals
natively in the nodetree.
"""
def __init__(self, node: "Node"):
self._info = {}
self._is_wildcard = False
self._is_partial = False
if any(wildcard in "".join(node.raw_tree) for wildcard in ["*", "?", "["]):
self._info = {"Node": node.root.node_to_raw_path(node)}
self._is_wildcard = True
else:
try:
self._info = next(iter(node.root.get_node_info_raw(node).values()))
self._info["Node"] = self._info.get(
"Node", node.root.node_to_raw_path(node)
).lower()
except KeyError as error:
self._info = {"Node": error.args[0]}
if "sample" in node.raw_tree and "sample" != node.raw_tree[-1]:
path, signal = self._info["Node"].split("/sample/")
self._info["Node"] = path + "/sample." + ".".join(signal.split("/"))
if self._check_partial(node):
self._is_partial = True
if self._check_dynamic(node):
self._info.update(
json.loads(
node.root.connection.listNodesJSON(error.args[0])
).get(error.args[0], {})
)
def __dir__(self):
dir_info = []
for var, value in vars(self.__class__).items():
if isinstance(value, property) and not var.startswith("_"):
dir_info.append(var)
return dir_info
def __repr__(self) -> str:
node_type = "partial" if self.is_partial else "leaf"
node_type = "wildcard" if self.contains_wildcards else node_type
return f'NodeInfo("{self.path}",{node_type}-node)'
def __str__(self) -> str:
string = self.path
if self.is_partial:
return string + "\nPartial node"
if self._is_wildcard:
return string + "\nContains wildcards"
if "Description" in self._info:
string += "\n" + self._info["Description"]
for key, value in self._info.items():
if key == "Options":
string += f"\n{key}:"
for option, description in value.items():
string += f"\n {option}: {description}"
elif key not in ["Description", "Node", "SetParser", "GetParser"]:
string += f"\n{key}: {value}"
return string
def __getitem__(self, item: str) -> t.Union[str, t.Dict[str, str]]:
return self._info[item]
def __contains__(self, k):
return k in self._info
def __hash__(self):
return hash(self.path + "NodeInfo")
def __eq__(self, other):
return self._info == other._info
T = t.TypeVar("T")
def set_parser(self, value: T) -> T:
"""Parse the set value."""
try:
_parser = self._info["SetParser"]
if isinstance(_parser, list):
for parser in _parser:
value = parser(value)
return value
return _parser(value)
except KeyError:
return value
def get_parser(self, value: T) -> T:
"""Parse the get value."""
try:
_parser = self._info["GetParser"]
if isinstance(_parser, list):
for parser in _parser:
value = parser(value)
return value
return _parser(value)
except KeyError:
return value
@staticmethod
def _check_partial(node: "Node") -> bool:
"""Flag if the node is a partial node."""
for child_node, _ in node.root:
if node.is_child_node(child_node) and node != child_node:
return True
return False
@staticmethod
def _check_dynamic(node: "Node") -> bool:
try:
return node.raw_tree[-2] == "waves"
except IndexError:
return False
@property
def is_partial(self) -> bool:
"""Flag if the node is a partial node (non-leaf node)."""
return self._is_partial
@property
def contains_wildcards(self) -> bool:
"""Flag if the node contains wildcards."""
return self._is_wildcard
@property
def readable(self) -> t.Optional[bool]:
"""Flag if the node is readable.
Returns None if the node does not provide the information (e.g. wildcard
or partial nodes.)
"""
try:
return "Read" in self._info["Properties"]
except KeyError:
return None
@property
def writable(self) -> t.Optional[bool]:
"""Flag if the node is writable.
Returns None if the node does not provide the information (e.g. wildcard
or partial nodes.)
"""
try:
return "Write" in self._info["Properties"]
except KeyError:
return None
@property
def is_setting(self) -> t.Optional[bool]:
"""Flag if the node is setting node.
Returns None if the node does not provide the information (e.g. wildcard
or partial nodes.)
"""
try:
return "Setting" in self._info["Properties"]
except KeyError:
return None
@property
def is_vector(self) -> t.Optional[bool]:
"""Flag if the value of the node a vector.
Returns None if the node does not provide the information (e.g. wildcard
or partial nodes.)
"""
try:
return "Vector" in self._info["Type"]
except KeyError:
return None
@property
def path(self) -> str:
"""Path (LabOne representation) of the node."""
return self._info["Node"]
@property
def description(self) -> str:
"""Description of the node."""
return self._info["Description"]
@property
def type(self) -> str:
"""Type of the node."""
return self._info["Type"]
@property
def unit(self) -> str:
"""Unit of the node."""
return self._info["Unit"]
_option_info = namedtuple("_option_info", ["enum", "description"])
@lazy_property
def options(self) -> t.Dict[int, _option_info]:
"""Options of the node."""
option_map = {}
for key, value in self._info.get("Options", {}).items():
# Find all the keywords. We use only the first one
# since it should be unambiguous
enum_re = re.findall(r'"([a-zA-Z0-9-_"]+)"', value)
enum = enum_re[0] if enum_re else ""
# The description is either what comes after
# the colon and space, or the whole string.
# This is the case for nameless options, when the
# key is an integer (for example demods/x/order)
desc = re.findall(r'(?:.+":\s)?(.+)$', value)[0]
option_map[int(key)] = self._option_info(enum, desc)
return option_map
@lazy_property
def enum(self) -> t.Optional[NodeEnum]:
"""Enum of the node options."""
options_reversed = {}
for int_key, value in self._info.get("Options", {}).items():
# Find all the keywords associated to a integer key
enum_re = re.finditer(r'"(?P<keyword>[a-zA-Z0-9-_"]+)"', value)
for m in enum_re:
keyword = m.group("keyword")
options_reversed[keyword] = int_key
return (
NodeEnum(self.path, options_reversed, module=__name__)
if options_reversed
# Nameless options do not have a enum.
else None
)
class Node:
"""Lazy node of a ``Nodetree``.
The node is implemented in a lazy way. Meaning unless operations are not
performed on the node, no checks whether the node is valid or not are
performed.
The child nodes of each node can be accessed in the same way than on the
Nodetree, either by attribute or by item.
The core functionality of each node is the overloaded call operator.
Making a call gets the value(s) for that node. Passing a value to the call
operator will set that value to the node on the device. Calling a node that
is not a leaf (wildcard or partial node) will return/set the value on every
node that matches it (the return type will be a dictionary).
Warning:
Setting a value to a non-leaf node will try to set the value of all
nodes that matches that node. It should therefor be used with great care
to avoid unintentional changes.
>>> nodetree.demods[0].freq()
1000
>>> nodetree.demods[0].freq(2000)
>>> nodetree.demods[0].freq()
2000
>>> nodetree.demods["*"].freq(3000)
>>> nodetree.demods["*"].freq()
{
'/dev1234/demods/0/freq': 3000
'/dev1234/demods/1/freq': 3000
'/dev1234/demods/2/freq': 3000
'/dev1234/demods/3/freq': 3000
}
.. versionchanged:: 0.3.5
Call operator returns `WildcardResult` when wildcards are used in
getting values.
.. versionchanged:: 0.5.0 Returns NodeDict instead of WildcardResult
The call operator supports the following flags:
* deep: Flag if the set operation should be blocking until the data
has arrived at the device, respectively if the get operation should
return the value from the device or the cached value on the data
server (if there is any). If this flag is set the operation can
take significantly longer. (default = False)
For a deep get operation the timestamp from the device is returned
in addition to the value (The timestamp can be None, e.g. deep gets
on LabOne modules).
>>> nodetree.demods[0].freq(deep=True)
(343283971893, 2000)
For a deep set the call operator will return the value acknowledged
by the device. e.g. important for floating point values with a
limited resolution.
>>> nodetree.demods[0].freq(29999,99999, deep=True)
3000
Warning:
The deep flag does not work for wildcard nodes or non leaf nodes since
they represent multiple nodes that are set in a transactional set which
does not report the acknowledged values.
* enum: Flag if enumerated values should return the enum value as
string. (default = True)
* parse: Flag if the SetParser/GetParser from the Node, if present,
should be applied or not (default = True).
The parsers are hard coded lambda functions provided not directly by
LabOne but need to be set manually (e.g. toolkit adds these for a
selected set of nodes). The Lambda function gets called right
before/after the API call to LabOne. Usually they are used to add
additional limitations and improve error reporting but in theory they
can be used for anything.
To add a parser to a node use the ``NodeTree.update_node`` function.
>>> nodetree.update_node(
"/dev1234/demods/0/freq",
{
"GetParser": lambda v: print(f"got {v} from LabOne") return v,
"SetParser": lambda v: print(f"set {v} to LabOne") return v,
},
)
In addition to the call operator the following magic methods are
implemented:
* __contains__
>>> "freq" in nodetree.demods[0]
True
* __iter__
>>> for node, info in nodetree.demods["*"].freq
* __eq__
>>> nodetree.demods[0].freq == nodetree.demods["*/freq"]
* __len__ (only implemented for list like nodes)
>>> len(nodetree.demods)
4
* __bool__ test if the node is a existing node
>>> if nodetree.demods[0].freq:
...
* __hash__ (e.g. necessary to be able to use nodes as key in dictionaries)
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
"""
def __init__(self, root: "NodeTree", tree: tuple):
self._root = root
self._tree = tree
self._is_valid: t.Optional[bool] = None
def __getattr__(self, name) -> "Node":
return Node(self._root, self._tree + (name,))
def __getitem__(self, name) -> "Node":
name = str(name).lower()
if "/" in name:
name_list = name.split("/")
if name_list[0]:
return Node(self._root, self._tree + (*name_list,))
return Node(self._root, self._tree + (*name_list[1:],))
return Node(self._root, self._tree + (name,))
def __contains__(self, k):
return k in self._next_layer
def __iter__(self):
for child_node, info in self._root:
if self.is_child_node(child_node):
yield child_node, info
def __repr__(self):
return self.node_info.path
def __dir__(self):
dir_info = list(self._next_layer)
for var, value in vars(self.__class__).items():
if (
isinstance(value, property)
and not var.startswith("_")
and var not in dir_info
):
dir_info.append(var)
return dir_info
def __eq__(self, other):
# buildin keywords are escaped with a tailing underscore
# (https://pep8.org/#descriptive-naming-styles)
own_node_list = tuple(node.rstrip("_") for node in self._tree)
other_node_list = tuple(node.rstrip("_") for node in other.raw_tree)
return own_node_list == other_node_list and self._root is other._root
def __hash__(self):
own_node_list = tuple(node.rstrip("_") for node in self._tree)
if not own_node_list:
own_node_list = "Node"
return hash((own_node_list, repr(self._root)))
def __bool__(self):
return self.is_valid()
def __len__(self):
if not self._is_list():
raise TypeError(f"Node {self.node_info.path} is not a list")
return len(self._next_layer)
def __call__(
self, value: t.Any = None, *, deep=False, enum=True, parse=True, **kwargs
) -> t.Any:
"""Call operator that either gets (empty) or gets the value of a node.
Args:
value: Optional value that should be set to the node. If not
specified the operator will return the value of the node
instead.
deep: Flag if the operation should block until the device has
acknowledged the operation. The operation returns the value
acknowledged by the device. This takes significantly longer
than a normal operation and should be used carefully.
enum: Flag if enumerated values should return the enum value as
string or return the raw number.
parse: Flag if the GetParser or SetParser, if present, should be
applied or not.
Returns:
Value of the node for a get operation. If the deep flag is set the
acknowledged value from the device is returned (applies also for
the set operation).
.. versionchanged:: 0.3.5
Returns `WildcardResult` when wildcards are used in
getting values.
.. versionchanged:: 0.5.0 Returns NodeDict instead of WildcardResult
.. versionchanged:: 0.6.1 Returns an enum on keywords nodes also
for deep gets.
Raises:
AttributeError: If the connection does not support the necessary
function to get/set the value.
RuntimeError: If self.node_info.type if one of the following:
[ZIPWAWave, ZITriggerSample, ZICntSample, ZIImpedanceSample,
ZIScopeWave, ZIAuxInSample]. The reason is that these nodes can
only be polled.
TypeError: if the deep command is not available for this node
(e.g. sample nodes)
KeyError: If the node does not resolve to at least one valid leaf
node.
"""
if value is None:
return self._get(deep=deep, enum=enum, parse=parse, **kwargs)
return self._set(value, deep=deep, enum=enum, parse=parse, **kwargs)
@lazy_property
def _next_layer(self) -> t.Set[str]:
"""A set of direct child nodes."""
next_layer = set()
for node, _ in self:
next_layer.add(node.raw_tree[len(self._tree)])
return next_layer
def _is_list(self) -> bool:
"""Checks if the node is a list type."""
return len(self._next_layer) > 0 and next(iter(self._next_layer)).isdecimal()
def _resolve_wildcards(self) -> t.List[str]:
"""Resolves potential wildcards.
Also will resolve partial nodes to its leaf nodes.
Returns:
List of matched nodes in the raw path format
"""
return resolve_wildcards_labone(
self._root.node_to_raw_path(self), self._root.raw_dict.keys()
)
def _parse_get_value(
self, value: t.Any, enum: bool = True, parse: bool = True
) -> t.Any:
"""Parse the raw value from the data server.
Args:
value: Raw value from the data server.
enum: Flag if enumerated values should return the enum value as a
string or as a raw number.
parse: Flag if the GetParser, if present, should be applied or not.
Returns:
Parsed value
"""
if enum and isinstance(value, (int, np.integer)):
try:
value = self.node_info.enum(value) if self.node_info.enum else value
except ValueError:
# If the value is not in the enum LabOne does weird stuff but
# we should not raise an exception ...
pass
if parse:
value = self.node_info.get_parser(value)
return value
def _get(
self, deep: bool = False, enum: bool = True, parse: bool = True, **kwargs
) -> t.Any:
"""Get the value from the node.
The kwargs will be forwarded to the mapped zhinst.core function call.
Args:
deep: Flag if the get operation should return the cached value
from the Data Server or get the value from the device, which is
significantly slower.
enum: Flag if enumerated values should return the enum value as
string or return the raw number.
parse: Flag if the GetParser, if present, should be applied or not.
Return:
value(s) from the device. If multiple values matches the the node a
dictionary of the childnodes and their value is returned. If the
``deep`` flag is set the value is a pair (timestamp, value) instead.
Raises:
AttributeError: if the connection does not support the necessary
function the get the value.
RuntimeError: If self.node_info.type if one of the following:
[ZIPWAWave, ZITriggerSample, ZICntSample, ZIImpedanceSample,
ZIScopeWave, ZIAuxInSample]. The reason is that these nodes can
only be polled.
TypeError: if the deep command is not available for this node
(e.g. sample nodes) or connection object.
KeyError: If the node does not resolve to at least one valid leaf
node.
"""
readable = self.node_info.readable
if readable:
timestamp = None
if deep:
timestamp, value = self._get_deep(**kwargs)
else:
value = self._get_cached(**kwargs)
value = self._parse_get_value(value, enum=enum, parse=parse)
return (timestamp, value) if deep else value
if readable is None and (
self.node_info.contains_wildcards or self.node_info.is_partial
):
return self._get_wildcard(deep=deep, enum=enum, parse=parse, **kwargs)
if readable is False:
raise AttributeError(f"{self.node_info.path} is not readable.")
raise KeyError(self.node_info.path)
@staticmethod
def _parse_get_entry(raw_value: t.Dict[t.Union[str, int], t.Any]):
"""Parser for the get function of zhinst.core.
The get function in zhinst.core support multiple values and returns the
results as a OrderdDict. This functions parses the value entry of that
dictionary into a (timestamp, value) pair.
Args:
raw_value: OrderdDict from the zhinst.core get command.
Returns:
(timestamp, value) pair.
"""
value = None
timestamp = None
try:
value = raw_value["value"][0]
timestamp = raw_value["timestamp"][0]
except TypeError:
# ZIVectorData have a different structure
value = raw_value[0]
if isinstance(value, dict):
timestamp = value["timestamp"]
value = value["vector"]
except IndexError:
# HF2 has not timestamp
value = raw_value[0]
except KeyError:
# HF2 returns sample nodes as well but we don`t parse them
value = raw_value
return (timestamp, value)
def _get_wildcard(
self, deep=True, enum=True, parse=True, **kwargs
) -> t.Union[NodeDict, t.Dict[str, t.Any]]:
"""Execute a wildcard get.
The get is performed as a deep get (for all devices except HF2)
regardless of the ``deep`` flag. If the ``deep`` flag is not set the
timestamp is removed to ensure concistency.
The kwargs will be forwarded to the maped zhinst.core function call.
Args:
deep: Flag if the get operation should return the cached value
from the Data Server or get the value from the device, which is
significantly slower. The wildcard get is always performed as
deep get but the timestamp is only returned if the ``deep```
flag is set.
enum: Flag if enumerated values should return the enum value as
string or return the raw number.
parse: Flag if the GetParser, if present, should be applied or not.
Returns:
``NodeDict`` if deep is `True`. Else a dictionary.
Dictionary or a Mapping with the values of all subnodes.
Raises:
KeyError: If the node does not resolve to at least one valid leaf
node.
"""
# modules don`t have settingsonly argument ... this will be caught in
# a try catch block to avoid unnecessary comparisons
kwargs.setdefault("settingsonly", False)
kwargs.setdefault("flat", True)
try:
result_raw = self._root.connection.get(self.node_info.path, **kwargs)
except TypeError:
del kwargs["settingsonly"]
result_raw = self._root.connection.get(self.node_info.path, **kwargs)
if not result_raw:
raise KeyError(self.node_info.path)
if not kwargs["flat"]:
return result_raw
result = {}
for sub_node_raw, node_value in result_raw.items():
sub_node = self._root.raw_path_to_node(sub_node_raw)
timestamp, value = self._parse_get_entry(node_value)
value = sub_node._parse_get_value(value, enum=enum, parse=parse)
# although the operation is a deep get we hide the timestamp
# to ensure consistency
result[sub_node_raw] = (timestamp, value) if deep else value
return NodeDict(result)
def _get_deep(self, **kwargs) -> t.Tuple[int, t.Any]:
"""Get the node value from the device.
The kwargs will be forwarded to the maped zhinst.core function call.
Note: The HF2 does not support the timestamp option and will therfore
return None for the timestamp.
Returns:
(timestamp, value) from the device.
Raises:
TypeError: if the deep command is not available for this node
(e.g. sample nodes)
"""
kwargs.setdefault("settingsonly", False)
# Flat must be set to True (if customers want the flat option they need
# to call zhinst.core directly)
kwargs["flat"] = True
raw_dict = self._root.connection.get(self.node_info.path, **kwargs)
if not raw_dict or len(raw_dict) == 0:
raise TypeError(
"keyword 'deep' is not available for this node. "
"(e.g. node is a sample node)"
)
raw_value = next(iter(raw_dict.values()))
return self._parse_get_entry(raw_value)
def _get_cached(self, **kwargs) -> t.Any:
"""Get the cached node value from the data server.
The kwargs will be forwarded to the maped zhinst.core function call.
Returns:
Cached node value from the data server.
Raises:
AttributeError: if the connection does not support the necessary
function the get the value.
RuntimeError: If self.node_info.type if one of the following:
[ZIPWAWave, ZITriggerSample, ZICntSample, ZIImpedanceSample,
ZIScopeWave, ZIAuxInSample]. The reason is that these nodes can
only be polled.
"""
if "Integer" in self.node_info.type:
return self._root.connection.getInt(self.node_info.path)
if self.node_info.type == "Double":
return self._root.connection.getDouble(self.node_info.path)
if self.node_info.type == "String":
return self._root.connection.getString(self.node_info.path)
if self.node_info.type == "ZIVectorData":
_, value = self._get_deep(**kwargs)
return value
if self.node_info.type == "Complex Double":
return self._root.connection.getComplex(self.node_info.path, **kwargs)
if self.node_info.type == "ZIDemodSample":
return self._root.connection.getSample(self.node_info.path, **kwargs)
if self.node_info.type == "ZIDIOSample":
return self._root.connection.getDIO(self.node_info.path, **kwargs)
if self.node_info.type == "ZIAdvisorWave":
raw_value = self._root.connection.get(self.node_info.path, flat=True)
return next(iter(raw_value.values()))[-1]
raise RuntimeError(
f"{self.node_info.path} has type {self.node_info.type} and can "
"only be polled."
)
def _set(
self, value: t.Any, deep=False, enum=True, parse=True, **kwargs
) -> t.Optional[t.Any]:
"""Set the value to the node.
The kwargs will be forwarded to the maped zhinst.core function call.
Args:
value: value
deep: Flag if the set operation should be blocking until the data
has arrived at the device. (default=False)
enum: Flag if enumerated values should accept the enum value as
string. (default=True)
parse: Flag if the SetParser, if present, should be applied or not.
(default=True)
Returns:
Acknowledged value on the device if ``deep`` flag is set (does not
work for wildcard or non leafs nodes since they are bundled in a
transaction).
Raises:
AttributeError: If the connection does not support the necessary
function the set the value.
KeyError: if the wildcard does not resolve to a valid node
RuntimeError: if deep set is not possible
TypeError: Connection does not support deep set
"""
writable = self.node_info.writable
if writable or self.node_info.contains_wildcards:
if parse:
value = self.node_info.set_parser(value)
if self._root.transaction.in_progress():
self._root.transaction.add(self, value)
elif deep:
return self._parse_get_value(
self._set_deep(value, **kwargs), enum=enum, parse=parse
)
else:
try:
self._root.connection.set(self.node_info.path, value, **kwargs)
except RuntimeError:
# Some vector nodes do not support support set command.
if self.node_info.type == "ZIVectorData":
self._root.connection.setVector(
self.node_info.path, value, **kwargs
)
else:
raise
return None
if self.node_info.is_partial:
return self["*"](value, deep=deep, enum=enum, parse=parse, **kwargs)
if writable is False:
raise AttributeError(f"{self.node_info.path} is read-only.")
raise KeyError(self.node_info.path)
def _set_deep(self, value: t.Any, **kwargs) -> t.Any:
"""Set the node value from device.
The kwargs will be forwarded to the mapped zhinst.core function call.
Args:
value: value
Returns:
Acknowledged value on the device. Only if a deeps set operation is
available for the connection object.
Raises:
RuntimeError: if deep set is not possible
TypeError: Connection does not support deep set
"""
try:
if isinstance(value, numbers.Integral):
return self._root.connection.syncSetInt(
self.node_info.path, value, **kwargs
)
if isinstance(value, numbers.Real):
return self._root.connection.syncSetDouble(
self.node_info.path, value, **kwargs
)
if isinstance(value, str):
return self._root.connection.syncSetString(
self.node_info.path, value, **kwargs
)
except TypeError as error:
raise TypeError(
"deep set is not supported for this connection."
"(this likely cause because the connection is a module and a deep "
"set does not make sense there.)"
) from error
raise RuntimeError(
f"Invalid type {type(value)} for deep set "
"(only int,float and str are supported)"
)
def is_child_node(self, child_node: "Node") -> bool:
"""Checks if a node is child node of this node.
Args:
child_node: Potential child node
Returns:
Boolean if passed node is a child node
"""
return fnmatch.fnmatchcase(
"/".join(child_node.raw_tree), "/".join(self.raw_tree) + "*"
)
def wait_for_state_change(
self,
value: t.Union[int, str, NodeEnum],
*,
invert: bool = False,
timeout: float = 2,
sleep_time: float = 0.005,
) -> None:
"""Waits until the node has the expected state/value.
Warning:
Only supports integer nodes. (The value can either be the value or
its corresponding enum value as string)
Args:
value: Expected value of the node.
.. versionchanged:: 0.6.1 Enums or strings are accepted for keywords nodes.
invert: Instead of waiting for the value, the function will wait for
any value except the passed value instead. (default = False)
Useful when waiting for value to change from existing one.
timeout: Maximum wait time in seconds. (default = 2)
sleep_time: Sleep interval in seconds. (default = 0.005)
Raises:
TimeoutError: Timeout exceeded.
"""
if self.node_info.contains_wildcards:
start_time = time.time()
nodes_raw = self._resolve_wildcards()
if not nodes_raw:
raise KeyError(self.node_info.path)
for node_raw in nodes_raw:
node = self._root.raw_path_to_node(node_raw)
node.wait_for_state_change(
value,
invert=invert,
timeout=max(0, timeout - (time.time() - start_time)),
sleep_time=sleep_time,
)
else:
# If the node is a keyword (has a enum defined) and the value is a string
# converts it to the numeric value
if self.node_info.enum and isinstance(value, str):
enum_value = self.node_info.enum[value]
parsed_value = self._parse_get_value(enum_value)
else:
parsed_value = self._parse_get_value(value)
start_time = time.time()
# Performs a deep get to avoid waiting on stale values from cache
# In the loop we can use a shallow get
# Since deep get
curr_value = self._get(deep=True)[1]
while start_time + timeout >= time.time():
# Verify if we get to the correct value.
# If yes, exit the function.
if (curr_value == parsed_value) != invert:
return
time.sleep(sleep_time)
curr_value = self._get(deep=False)
# In case of timeout, raise the correct error
# If the user passed a string or enum, uses it for the error report
if isinstance(value, (str, NodeEnum)):
value = repr(parsed_value.name)
curr_value = repr(curr_value.name)
if invert:
raise TimeoutError(
f"{self.node_info.path} did not change from the expected"
f" value {value} within {timeout}s."
)
raise TimeoutError(
f"{self.node_info.path} did not change to the expected value"
f" within {timeout}s. {value} != {curr_value}"
)
def subscribe(self) -> None:
"""Subscribe to this node (its child lead nodes).
To get data from data from the subscribed nodes use the poll command
(provided by the Connection).
In order to avoid fetching old data that is still in the buffer execute
a sync command before subscribing to new data streams.
"""
try:
self._root.connection.subscribe(self.node_info.path)
except RuntimeError as error:
raise KeyError(self.node_info.path) from error
def unsubscribe(self) -> None:
"""Unsubscribe this node (its child lead nodes).
Use this command after recording to avoid buffer overflows that may
increase the latency of subsequent commands.
"""
try:
self._root.connection.unsubscribe(self.node_info.path)
except RuntimeError as error:
raise KeyError(self.node_info.path) from error
def get_as_event(self) -> None:
"""Trigger an event for that node (its child lead nodes).
The node data is returned by a subsequent poll command.
"""
try:
self._root.connection.getAsEvent(self.node_info.path)
except RuntimeError as error:
raise KeyError(self.node_info.path) from error
def child_nodes(
self,
*,
recursive: bool = False,
leavesonly: bool = False,
settingsonly: bool = False,
streamingonly: bool = False,
subscribedonly: bool = False,
basechannelonly: bool = False,
excludestreaming: bool = False,
excludevectors: bool = False,
) -> t.Generator["Node", None, None]:
"""Generator for all child nodes that matches the filters.
If the nodes does not contain any child nodes the generator will only
contain the node itself (if it matches the filters).
Warning:
The Data Server supports only the asterisk wildcard. For all
other wildcards the matching child nodes can be generated manually
within the nodetree.
Enabling additional flags how ever require that each node that
matches the wildcard needs to be checked in a separate request by
the Data Server which can cause a severe latency. Therefore one
needs to enable the `full_wildcard` flag in order to support the
manual generation of the matching child nodes.
Examples:
>>> child_nodes = nodetree.demods[0].child_nodes()
>>> next(child_nodes)
/dev1234/demods/0/freq
Args:
recursive: Returns the nodes recursively (default: False)
leavesonly: Returns only nodes that are leaves, which means
they are at the outermost level of the tree (default: False).
settingsonly: Returns only nodes which are marked as setting
(default: False).
streamingonly: Returns only streaming nodes (default: False).
subscribedonly: Returns only subscribed nodes
(default: False).
basechannelonly: Return only one instance of a node in case
of multiple channels (default: False).
excludestreaming: Exclude streaming nodes (default: False).
excludevectors: Exclude vector nodes (default: False).
Returns:
Generator of all child nodes that match the filters
"""
raw_path = self._root.node_to_raw_path(self)
raw_result = self._root.connection.listNodes(
raw_path,
recursive=recursive,
leavesonly=leavesonly,
settingsonly=settingsonly,
streamingonly=streamingonly,
subscribedonly=subscribedonly,
basechannelonly=basechannelonly,
excludestreaming=excludestreaming,
excludevectors=excludevectors,
)
for node_raw in raw_result:
yield self._root.raw_path_to_node(node_raw)
def is_valid(self) -> bool:
"""Check if the node is a valid node.
Valid node means it resolves to at least one existing node in the node
tree. Meaning not only leaf nodes are valid nodes but also partial nodes
and nodes containing wildcards.
Returns:
Flag if the node is a valid node
"""
if self._is_valid is None:
keys = self._resolve_wildcards()
self._is_valid = len(keys) > 0
return self._is_valid
@property
def node_info(self) -> NodeInfo:
"""Additional information about the node."""
return self.root.get_node_info(self)
@property
def raw_tree(self) -> t.Tuple[str, ...]:
"""Internal representation of the node."""
return self._tree
@property
def root(self) -> "NodeTree":
"""Node tree to which this node belongs to."""
return self._root
class NodeList(Sequence, Node):
"""List of nodelike objects.
List of preinitialized classes that inherit from the ``Node`` class would not
support wildcards since they would be of type list.
This class holds the preinitialized objects. But if a the passed item is not
an integer it returns a Node instead.
Warning:
Since in case of a passed wildcard symbol the return value is a node,
the additional functionality that the nodelike object may provide
(e.g. helper functions) are no longer accessible.
Args:
elements: Preinitialized child elements
root: Root of the nodetree
tree: Node tree (node path as tuple) of the current node
"""
def __init__(self, elements: t.Sequence[t.Any], root: "NodeTree", tree: tuple):
Sequence.__init__(self)
Node.__init__(self, root, tree)
self._elements: t.Sequence[t.Any] = elements
@t.overload
def __getitem__(
self, idx: t.Union[int, str]
) -> t.Union[t.Any, Node]: # pragma: no cover
...
@t.overload
def __getitem__(
self, s: slice
) -> t.Sequence[t.Union[t.Any, Node]]: # pragma: no cover
...
def __getitem__(self, item):
# User numpy check here to ensure numpy types are handled correctly (#252)
if np.issubdtype(type(item), np.integer):
return self._elements[item]
return Node(self._root, self._tree + (str(item),))
def __len__(self):
return len(self._elements)
def __eq__(self, other):
return Node.__eq__(self, other)
def __hash__(self):
return Node.__hash__(self) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/nodetree/node.py | node.py |
import typing as t
from contextlib import contextmanager
from collections.abc import Mapping
import re
from _thread import RLock # type: ignore
# TypedDict is available in the typing module since 3.8
# Ift we only support 3.8 we should switch to t.TypedDict
from typing_extensions import TypedDict
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.nodetree.node import Node
T = t.TypeVar("T")
_NodeInfo = TypedDict(
"_NodeInfo",
{
"Node": str,
"Description": str,
"Properties": str,
"Type": str,
"Unit": str,
"Options": t.Dict[str, str],
},
)
NodeDoc = t.Dict[str, _NodeInfo]
_NOT_FOUND = object()
# Exact implementation of functools.cached_property from Python 3.8
# This is needed for Python 3.7 compatibility
# It should be removed once we drop support for Python 3.7
class lazy_property:
"""Copied functools.cached_property from Python 3.8.
Decorator that converts a method with a single self argument into a
property cached on the instance.
"""
def __init__(self, func):
self.func = func
self.attrname = None
self.__doc__ = func.__doc__
self.lock = RLock()
def __set_name__(self, owner, name):
if self.attrname is None:
self.attrname = name
elif name != self.attrname:
raise TypeError(
"Cannot assign the same cached_property to two different names "
f"({self.attrname!r} and {name!r})."
)
def __get__(self, instance, owner=None):
if instance is None:
return self
if self.attrname is None:
raise TypeError(
"Cannot use cached_property instance without "
"calling __set_name__ on it."
)
try:
cache = instance.__dict__
except AttributeError: # not all objects have __dict__
msg = (
f"No '__dict__' attribute on {type(instance).__name__!r} "
f"instance to cache {self.attrname!r} property."
)
raise TypeError(msg) from None
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
try:
cache[self.attrname] = val
except TypeError:
msg = (
f"The '__dict__' attribute on {type(instance).__name__!r} "
"instance does not support item assignment for caching "
f"{self.attrname!r} property."
)
raise TypeError(msg) from None
return val
@contextmanager
def create_or_append_set_transaction(nodetree) -> t.Generator[None, None, None]:
"""Context manager for a transactional set.
In contrast to the set_transaction from the nodetree this function only
creates a new transaction if no other is in progress.
Should only be called withing the toolkit code.
Warning:
This function will silently fail if the existing transaction is exited
before this function finishes.
Warning:
The set is always performed as deep set if called on device nodes.
Examples:
>>> with nodetree.set_transaction():
nodetree.test[0].a(1)
with create_or_append_set_transaction(nodetree):
nodetree.test[1].a(2)
nodetree.test[2].a(2)
"""
if not nodetree.transaction.in_progress():
with nodetree.set_transaction():
yield
else:
yield
def resolve_wildcards_labone(path: str, nodes: t.List[str]) -> t.List[str]:
"""Resolves potential wildcards.
Also will resolve partial nodes to its leaf nodes.
Returns:
List of matched nodes in the raw path format
"""
node_raw = re.escape(path)
node_raw = node_raw.replace("/\\*/", "/[^/]*/").replace("/\\*", "/*") + "(/.*)?$"
node_raw_regex = re.compile(node_raw)
return list(filter(node_raw_regex.match, nodes))
class NodeDict(Mapping):
"""Mapping of dictionary structure results.
The mapping allows to access data with both the string and the toolkit
node objects.
Args:
result: A dictionary of node/value pairs.
Example:
>>> result = device.demods["*"].enable()
>>> print(result)
{
'/dev1234/demods/0/enable': 0,
'/dev1234/demods/1/enable': 1,
}
>>> result[device.demods[0].enable]
0
>>> result["/dev1234/demods/0/enable"]
0
.. versionadded:: 0.3.5 Renamed from WildcardResult
"""
def __init__(self, result: t.Dict[str, t.Any]):
self._result = result
def __repr__(self):
return repr(self._result)
def __getitem__(self, key: t.Union[str, "Node"]):
return self._result[str(key)]
def __iter__(self):
return iter(self._result)
def __len__(self):
return len(self._result)
def to_dict(self) -> t.Dict[str, t.Any]:
"""Convert the WildcardResult to a dictionary.
After conversion, :class:`Node` objects cannot be used to get items.
"""
return self._result | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/nodetree/helper.py | helper.py |
import fnmatch
import json
import re
import typing as t
from collections import OrderedDict
from numpy import array
from zhinst.toolkit.nodetree.helper import NodeDoc
from zhinst.toolkit.exceptions import ToolkitError
class ConnectionDict:
"""Connection wrapper around a dictionary.
The ``NodeTree`` expects a connection that complies to the
protocol :class:`nodetree.Connection`. In order to also support raw
dictionaries this class wraps around a python dictionary and exposes the
required protocol.
Args:
data: Dictionary raw path: value
json_info: JSON information for each path (path: info)
"""
def __init__(self, data: t.Dict[str, t.Any], json_info: NodeDoc):
super().__init__()
self._values = data
self.json_info = json_info
def _get_value(self, path: str) -> t.Any:
"""Return the value for a given path.
If the value is callable it is called.
Args:
path: Key in the internal values dictionary.
Returns:
The value of the given path.
"""
value = self._values[path]
if callable(value):
return value()
return value
def _resolve_wildcards(self, path: str) -> t.List[str]:
path_raw = path.replace("/\\*/", "/[^/]*/")
path_raw_regex = re.compile(path_raw)
return list(filter(path_raw_regex.match, self._values.keys()))
def _set_value(self, path: str, value: t.Any) -> None:
"""Set the value for a given path.
If the value is callable it is called with the new value.
Args:
path: Key in the internal values dictionary.
value: New value of the path.
"""
paths = self._resolve_wildcards(path)
if not paths:
raise KeyError(path)
for path in paths:
self._do_set_value(path, value)
def _do_set_value(self, path: str, value: t.Any) -> None:
if callable(self._values[path]):
self._values[path](value)
else:
self._values[path] = value
def listNodesJSON(self, path: str, *args, **kwargs) -> str:
"""Returns a list of nodes with description found at the specified path."""
if path == "*":
return json.dumps(self.json_info)
json_info = {}
for node, info in self.json_info.items():
if fnmatch.fnmatchcase(node, path + "*"):
json_info[node] = info
return json.dumps(json_info)
def get(self, path: str, *args, **kwargs) -> t.Any:
"""Mirrors the behavior of zhinst.core get command."""
nodes_raw = fnmatch.filter(self._values.keys(), path)
if not nodes_raw:
nodes_raw = fnmatch.filter(self._values.keys(), path + "*")
return_value = OrderedDict()
for node in nodes_raw:
return_value[node] = array([self._get_value(node)])
return return_value
def getInt(self, path: str) -> int:
"""Mirrors the behavior of zhinst.core getInt command."""
try:
return int(self._get_value(path))
except TypeError:
if self._get_value(path) is None:
return 0
raise
def getDouble(self, path: str) -> float:
"""Mirrors the behavior of zhinst.core getDouble command."""
return float(self._get_value(path))
def getString(self, path: str) -> str:
"""Mirrors the behavior of zhinst.core getDouble command."""
return str(self._get_value(path))
def _parse_input_value(self, path: str, value: t.Any):
if isinstance(value, str):
option_map = {}
for key, option in self.json_info[path].get("Options", {}).items():
node_options = re.findall(r'"(.+?)"[,:]+', option)
option_map.update({x: int(key) for x in node_options})
return option_map.get(value, value)
return value
def set(
self,
path: t.Union[str, t.List[t.Tuple[str, t.Any]]],
value: t.Any = None,
**kwargs,
) -> None:
"""Mirrors the behavior of zhinst.core set command."""
if isinstance(path, str):
self._set_value(path, self._parse_input_value(path, value))
else:
for node, node_value in path:
self._set_value(node, self._parse_input_value(node, node_value))
def setVector(self, path: str, value: t.Any = None) -> None:
"""Mirrors the behavior of zhinst.core setVector command."""
self.set(path, value)
def subscribe(self, path: str) -> None:
"""Mirrors the behavior of zhinst.core subscribe command."""
raise ToolkitError("Can not subscribe within the SHFQA_Sweeper")
def unsubscribe(self, path: str) -> None:
"""Mirrors the behavior of zhinst.core unsubscribe command."""
raise ToolkitError("Can not subscribe within the SHFQA_Sweeper") | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/nodetree/connection_dict.py | connection_dict.py |
import fnmatch
import json
import typing as t
from keyword import iskeyword as is_keyword
# Protocol is available in the typing module since 3.8
# Ift we only support 3.8 we should switch to t.Protocol
from typing_extensions import Protocol
from contextlib import contextmanager
from zhinst.toolkit.nodetree.helper import NodeDoc, _NodeInfo
from zhinst.toolkit.nodetree.node import Node, NodeInfo
from zhinst.toolkit.exceptions import ToolkitError
class Connection(Protocol):
"""Protocol class for the connection used in the nodetree.
Every connection object used in the Nodetree is expected to have at least
this interface in order to work with the Nodetree.
"""
# pylint: disable=invalid-name
def listNodesJSON(self, path: str, *args, **kwargs) -> str:
"""Returns a list of nodes with description found at the specified path."""
def get(self, path: str, *args, **kwargs) -> object:
"""Mirrors the behavior of zhinst.core ``get`` command."""
def getInt(self, path: str) -> int:
"""Mirrors the behavior of zhinst.core ``getInt`` command."""
def getDouble(self, path: str) -> float:
"""Mirrors the behavior of zhinst.core ``getDouble`` command."""
def getString(self, path: str) -> str:
"""Mirrors the behavior of zhinst.core ``getDouble`` command."""
@t.overload
def set(self, path: str, value: t.Any) -> None:
"""Mirrors the behavior of zhinst.core ``set`` command."""
@t.overload
def set(self, path: t.Union[t.List[t.Tuple[str, t.Any]]]) -> None:
"""Mirrors the behavior of zhinst.core ``set`` command."""
def set(self, path, value=None) -> None:
"""Mirrors the behavior of zhinst.core ``set`` command."""
def subscribe(self, path: str) -> None:
"""Mirrors the behavior of zhinst.core ``subscribe`` command."""
def unsubscribe(self, path: str) -> None:
"""Mirrors the behavior of zhinst.core ``unsubscribe`` command."""
class Transaction:
"""Transaction Manager.
Buffers commands (node, value pairs)
Args:
nodetree: Underlying Nodetree
"""
def __init__(self, nodetree: "NodeTree"):
self._queue: t.Optional[t.List[t.Tuple[str, t.Any]]] = None
self._root = nodetree
self._add_callback: t.Optional[t.Callable[[str, t.Any], None]] = None
def start(
self, add_callback: t.Optional[t.Callable[[str, t.Any], None]] = None
) -> None:
"""Start the transaction.
Args:
add_callback: Callback to be called when ever a node value pare has
been added to the queue. Only valid for the started
transaction.
Raises:
ToolkitError: A transaction is already in progress.
.. versionchanged:: 0.4.0
add_callback added.
"""
if self.in_progress():
raise ToolkitError(
"A transaction is already in progress. Only one transaction is "
"possible at a time."
)
self._queue = []
self._add_callback = add_callback
def stop(self) -> None:
"""Stop the transaction."""
self._queue = None
self._add_callback = None
def add(self, node: t.Union[Node, str], value: t.Any) -> None:
"""Adds a single node set command to the set transaction.
Args:
node: Node object or string representing the node.
value: Value that should be set to the node.
Raises:
AttributeError: If no transaction is in progress.
ValueError: If the node is passed as a string in form of a relative
path and no prefix can be added.
"""
try:
self._queue.append( # type: ignore[union-attr]
(self._root.to_raw_path(node), value)
)
if self._add_callback:
self._add_callback(*self._queue[-1]) # type: ignore[index]
except AttributeError as exception:
raise AttributeError("No set transaction is in progress.") from exception
def in_progress(self) -> bool:
"""Flag if the transaction is in progress."""
return self._queue is not None
def result(self) -> t.Optional[t.List[t.Tuple[str, t.Any]]]:
"""Resulting transaction list.
Result:
List of all added node value pairs.
"""
return self._queue
class NodeTree:
"""High-level generic lazy node tree.
All interactions with an Zurich Instruments device or a LabOne
module happens through manipulating nodes. The ``NodeTree`` provides a
pythonic way for that.
It reads all available nodes its additional information from the provided
connection and makes them available in nested dictionary like interface. The
interface also supports accessing the nodes by attribute.
>>> nodetree = NodeTree(connection)
>>> nodetree.example.nodes[8].test
/example/nodes/8/test
To speed up the initialization time the node tree is initialized lazy.
Meaning the dictionary is kept as a flat dictionary and is not converted
into a nested one. In addition the nested node objects returned by the
``NodeTree`` also are just simple placeholders. Only when performing
operations on a node its validity is checked an the calls get translated to
the correct node string. (For more information on how to manipulate nodes
refer to :class:`zhinst.toolkit.nodetree.node.Node`).
Examples:
>>> nodetree = NodeTree(daq)
>>> nodetree.dev123.demods[0].freq
/dev123/demods/0/freq
>>> nodetree = NodeTree(daq, prefix_hide = "dev123", list_nodes = ["/dev123/*"])
>>> nodetree.demods[0].freq
/dev123/demods/0/freq
Args:
connection: Underlying connection for the node tree. All
operations are converted into calls to that connection.
prefix_hide: Prefix, e.g. device id, that should be hidden in the
nodetree. (Hidden means that users do not need to specify it and it
will be added automatically to the nodes if necessary)
(default = None)
list_nodes: List of nodes that should be downloaded from the connection.
By default all available nodes are downloaded. (default = None)
preloaded_json: Optional preloaded node information.
(e.g for the HF2 that does not support the `listNodesJson` function)
"""
def __init__(
self,
connection: Connection,
prefix_hide: t.Optional[str] = None,
list_nodes: t.Optional[list] = None,
preloaded_json: t.Optional[NodeDoc] = None,
):
self._prefix_hide = prefix_hide.lower() if prefix_hide else None
self._connection = connection
if not list_nodes:
list_nodes = ["*"]
self._flat_dict: NodeDoc = {}
if preloaded_json:
self._flat_dict = preloaded_json
else:
for element in list_nodes:
nodes_json = self.connection.listNodesJSON(element)
self._flat_dict = {**self._flat_dict, **json.loads(nodes_json)}
self._flat_dict = {key.lower(): value for key, value in self._flat_dict.items()}
self._transaction = Transaction(self)
# First Layer must be generate during initialization to calculate the
# prefixes to keep
self._first_layer: t.List[str] = []
self._prefixes_keep: t.List[str] = []
self._node_infos: t.Dict[Node, NodeInfo] = {}
self._generate_first_layer()
def __getattr__(self, name):
if not name.startswith("_"):
return Node(self, (name.lower(),))
return None
def __getitem__(self, name):
name = name.lower()
if "/" in name:
name_list = name.split("/")
if name_list[0]:
return Node(self, (*name_list,))
return Node(self, (*name_list[1:],))
return Node(self, (name,))
def __contains__(self, k):
return k.lower() in self._first_layer
def __dir__(self):
return self._first_layer
def __iter__(self) -> t.Iterator[t.Tuple[Node, _NodeInfo]]:
for node_raw, info in self._flat_dict.items():
yield self.raw_path_to_node(node_raw), info
def _generate_first_layer(self) -> None:
"""Generates the internal ``_first_layer`` list.
The list represents the available first layer of nested nodes.
Also create the self._prefixes_keep variable. Which is the inverse of
the self._prefix_hide attribute.
Raises:
SyntaxError: If any node does not start with a leading slash.
"""
for raw_node in self._flat_dict:
if not raw_node.startswith("/"):
raise SyntaxError(f"{raw_node}: Leading slash not found")
node_split = raw_node.split("/")
# Since we always have a leading slash we ignore the first element
# which is empty.
if node_split[1] == self._prefix_hide:
if node_split[2] not in self._first_layer:
self._first_layer.append(node_split[2])
else:
if node_split[1] not in self._prefixes_keep:
self._prefixes_keep.append(node_split[1])
self._first_layer.extend(self._prefixes_keep)
def get_node_info(self, node: Node):
"""Get the node information for a node.
The nodetree caches the node information for each node.
This enables lazy created nodes to access its information
fast without additional cost.
Please note that this function returns a valid object for all
node objects. Even if the node does not exist on the device.
The cache (dict) lifetime is bound to the nodetree object and
each session/nodetree instance must have its own cache.
Args:
node: Node object
Returns:
Node information
.. versionadded:: 0.6.0
"""
try:
return self._node_infos[node]
except KeyError:
self._node_infos[node] = NodeInfo(node)
return self._node_infos[node]
def get_node_info_raw(
self, node: t.Union[Node, str]
) -> t.Dict[Node, t.Optional[t.Dict]]:
"""Get the information/data for a node.
Unix shell-style wildcards are supported.
Args:
node: Node object or string representation.
Returns:
Node(s) information.
Raises:
KeyError if the node does not match an existing node.
ValueError: If the node is passed as a string in form of a relative
path and no prefix can be added.
"""
key = self.to_raw_path(node)
# resolve potential wildcards
keys = fnmatch.filter(self._flat_dict.keys(), key)
result = {}
for single_key in keys:
result[self.raw_path_to_node(single_key)] = self._flat_dict.get(single_key)
if not result:
raise KeyError(key)
return result
def update_node(
self,
node: t.Union[Node, str],
updates: t.Dict[str, t.Any],
*,
add: bool = False,
) -> None:
"""Update a node in the NodeTree.
Nodes containing wildcards will be resolved but it is not possible to
add new nodes with a ``node`` argument containing wildcards.
Args:
node: Node object or string representation.
updates: Data that will be updated (overwrites the existing values).
add: Flag a non-existing node should be added (default = False).
Raises:
KeyError: If node does not exist and the ``add`` Flag is not set
ValueError: If the node is passed as a string in form of a relative
path and no prefix can be added.
"""
potential_key = self.to_raw_path(node).lower()
# resolve potential wildcards
keys = fnmatch.filter(self._flat_dict.keys(), potential_key)
if not keys:
if not add:
raise KeyError(potential_key)
if any(wildcard in potential_key for wildcard in ["*", "?", "["]):
# Can be implemented in the future if necessary
raise RuntimeError(
f"{potential_key}: Unable to resolve wildcards when adding "
"new nodes."
)
self._flat_dict[potential_key] = updates
first_node = potential_key.split("/")[1]
if not self._prefix_hide == first_node:
self._prefixes_keep.append(first_node)
self._first_layer.append(first_node)
else:
for single_key in keys:
self._flat_dict[single_key].update(updates)
self._node_infos = {}
def update_nodes(
self,
update_dict: t.Dict[t.Union[Node, str], t.Dict[str, t.Any]],
*,
add: bool = False,
raise_for_invalid_node: bool = True,
) -> None:
"""Update multiple nodes in the NodeTree.
Similar to :func:`update_node` but for multiple elements that are
represented as a dict.
Args:
update_dict: Dictionary with node as keys and entries that will be
updated as values.
add: Flag a non-existing node should be added (default = False).
raise_for_invalid_node: If set to `True`, when `add` is False and the
node(s) are invalid/nonexistent, an error is raised.
Otherwise will issue a warning and continue adding the valid nodes.
.. versionadded:: 0.3.4
Raises:
KeyError: If node does not exist and the ``add`` flag is not set
"""
for node, updates in update_dict.items():
try:
self.update_node(node, updates, add=add)
except KeyError:
if raise_for_invalid_node:
raise
def raw_path_to_node(self, raw_path: str) -> Node:
"""Converts a raw node path string into a Node object.
The function does not check if the node exists, but if the node exist
the returned node does correspond also to that node.
Args:
raw_path: Raw node path (e.g. /dev1234/relative/path/to/node).
Returns:
The corresponding node object linked to this nodetree.
"""
node_split = raw_path.split("/")
# buildin keywords are escaped with a tailing underscore
# (https://pep8.org/#descriptive-naming-styles)
node_split = [
element + "_" if is_keyword(element) else element for element in node_split
]
# Since we always have a leading slash we ignore the first element
# which is empty.
if node_split[1] == self._prefix_hide:
return Node(self, (*node_split[2:],))
return Node(self, (*node_split[1:],))
def to_raw_path(self, node: t.Union[Node, str]) -> str:
"""Converts a node into a raw node path string.
The function does not check if the node exists, but if the node exist
the returned raw node path exists in the underlying dictionary.
Args:
node: Node object or string representing the node.
Returns:
Raw node path that can be used a key in the internal dictionary.
Raises:
ValueError: If the node is passed as a string in form of a relative
path and no prefix can be added.
"""
return (
self.node_to_raw_path(node)
if isinstance(node, Node)
else self.string_to_raw_path(node)
)
def node_to_raw_path(self, node: Node) -> str:
"""Converts a node into a raw node path string.
The function does not check if the node exists, but if the node exist
the returned raw node path exists in the underlying dictionary.
Args:
node: Node object.
Returns:
Raw node path that can be used a key in the internal dictionary.
"""
if not node.raw_tree:
return "/" + self._prefix_hide if self._prefix_hide else "/"
# buildin keywords are escaped with a tailing underscore
# (https://pep8.org/#descriptive-naming-styles)
node_list = [element.rstrip("_") for element in node.raw_tree]
if node_list[0] in self._prefixes_keep:
string_list = "/".join(node_list)
else:
try:
string_list = "/".join(
[self._prefix_hide] + node_list # type: ignore[arg-type]
)
except TypeError:
string_list = "/".join(node_list)
return "/" + string_list
def string_to_raw_path(self, node: str) -> str:
"""Converts a string representation of a node into a raw node path string.
The function does not check if the node exists, but if the node exist
the returned raw node path exists in the underlying dictionary.
If the string does not represent a absolute path (leading slash) the
:obj:`prefix_hide` will be added to the node string.
Args:
node: A string representation of the node.
Returns:
Raw node path that can be used a key in the internal dictionary.
Raises:
ValueError: If the node is a relative path and no prefix can be
added.
"""
if not node.startswith("/"):
try:
return "/" + self._prefix_hide + "/" + node.lower() # type: ignore
except TypeError as error:
raise ValueError(
f"{node} is a relative path but should be a "
"absolute path (leading slash)"
) from error
return node.lower()
@contextmanager
def set_transaction(self) -> t.Generator[None, None, None]:
"""Context manager for a transactional set.
Can be used as a context in a with statement and bundles all node set
commands into a single transaction. This reduces the network overhead
and often increases the speed.
Within the with block all set commands to a node will be buffered
and grouped into a single command at the end of the context
automatically. (All other operations, e.g. getting the value of a node,
will not be affected)
Warning:
The set is always performed as deep set if called on device nodes.
Examples:
>>> with nodetree.set_transaction():
nodetree.test[0].a(1)
nodetree.test[1].a(2)
"""
self._transaction.start()
try:
yield
self.connection.set(self._transaction.result()) # type: ignore[arg-type]
finally:
self._transaction.stop()
@property
def transaction(self) -> Transaction:
"""Transaction manager."""
return self._transaction
@property
def connection(self) -> Connection:
"""Underlying connection."""
return self._connection
@property
def prefix_hide(self) -> t.Optional[str]:
"""Prefix (e.g device id), that is hidden in the nodetree.
Hidden means that users do not need to specify it and it will be added
automatically to the nodes if necessary.
"""
return self._prefix_hide
@property
def raw_dict(self) -> dict:
"""Underlying flat dictionary with all node information."""
return self._flat_dict | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/nodetree/nodetree.py | nodetree.py |
import logging
UHFQA_SAMPLE_RATE = 1.8e9
SHFQA_SAMPLE_RATE = 2e9
logger = logging.getLogger(__name__)
class Parse:
"""Input and output parsers for node parameters to validate and parse values."""
@staticmethod
def from_bool(value: bool) -> int:
"""Convert a boolean value to a integer value.
Args:
value: A boolean value.
Returns:
Integer value.
"""
return int(value)
@staticmethod
def to_bool(value: int) -> bool:
"""Convert a integer value to a boolean value.
Args:
value: A integer value.
Returns:
Boolean value.
"""
return bool(value)
@staticmethod
def phase(raw_phase: float) -> float:
"""Corrects the phase to -180 <= value <= 180.
Args:
raw_phase: Raw input phase.
Returns:
Corrected phase.
"""
return (raw_phase + 180) % 360 - 180
@staticmethod
def greater_equal(value: float, limit: float) -> float:
"""Ensures that the value is greater or equal a lower limit.
Args:
value: Used value.
limit: Minimum value returned.
Returns:
Clamped value.
"""
if value < limit:
logger.warning(
f"The value {value:.3e} must be greater than or equal to "
f"{limit:.3e} and will be rounded up to: "
f"{limit:.3e}"
)
return limit
else:
return value
@staticmethod
def smaller_equal(value: float, limit: float) -> float:
"""Ensures that the value is smaller or equal a upper limit.
Args:
value: Used value.
limit: Maximum value returned.
Returns:
Clamped value.
"""
if value > limit:
logger.warning(
f"The value {value:.3e} must be smaller than or equal to "
f"{limit:.3e} and will be rounded down to: "
f"{limit:.3e}",
)
return limit
else:
return value
@staticmethod
def multiple_of(value: float, factor: float, rounding: str) -> float:
"""Rounds a value to a multiple of a given factor.
Args:
value: Input value.
factor: Factor that the value needs to be multiple of.
rounding: Method of rounding (nearest, down).
Returns:
Rounded value.
.. versionchanged:: 0.5.3
Invalid `rounding` value raises `ValueError` instead of `RuntimeError`.
"""
if abs(round(value / factor) * factor - value) < 1e-12:
return value
elif rounding == "nearest":
v_rounded = round(value / factor) * factor
logger.warning(
f"The value {value:.3e} is not a multiple of "
f"{factor:.3e} and will be rounded to nearest "
f"multiple: {v_rounded:.3e}",
)
return v_rounded
elif rounding == "down":
v_rounded = round(value // factor) * factor
logger.warning(
f"The value {value:.3e} is not a multiple of "
f"{factor:.3e} and will be rounded down to greatest "
f"multiple: {v_rounded:.3e}",
)
return v_rounded
raise ValueError(
f"Invalid rounding type {rounding} only the "
"following values are allowed: [nearest,down]"
)
node_parser = {
"SHFQA": {
"scopes/0/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"scopes/0/channels/*/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"scopes/0/trigger/delay": {
"SetParser": lambda v: Parse.multiple_of(v, 2e-9, "nearest"),
},
"scopes/0/length": {
"SetParser": [
lambda v: Parse.greater_equal(v, 16),
lambda v: Parse.smaller_equal(v, 2**18),
lambda v: Parse.multiple_of(v, 16, "down"),
],
},
"scopes/0/segments/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"scopes/0/segments/count": {
"SetParser": lambda v: Parse.greater_equal(v, 0),
},
"scopes/0/averaging/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"scopes/0/averaging/count": {
"SetParser": lambda v: Parse.greater_equal(v, 0),
},
"qachannels/*/input/on": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"qachannels/*/output/on": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"qachannels/*/input/range": {
"SetParser": [
lambda v: Parse.greater_equal(v, -50),
lambda v: Parse.smaller_equal(v, 10),
lambda v: Parse.multiple_of(v, 5, "nearest"),
],
},
"qachannels/*/output/range": {
"SetParser": [
lambda v: Parse.greater_equal(v, -50),
lambda v: Parse.smaller_equal(v, 10),
lambda v: Parse.multiple_of(v, 5, "nearest"),
],
},
"qachannels/*/centerfreq": {
"SetParser": [
lambda v: Parse.greater_equal(v, 1e9),
lambda v: Parse.smaller_equal(v, 8e9),
lambda v: Parse.multiple_of(v, 100e6, "nearest"),
],
},
"qachannels/*/generator/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"qachannels/*/generator/delay": {
"SetParser": lambda v: Parse.multiple_of(v, 2e-9, "nearest"),
},
"qachannels/*/generator/single": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"qachannels/*/readout/result/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"qachannels/*/oscs/0/gain": {
"SetParser": [
lambda v: Parse.smaller_equal(v, 1.0),
lambda v: Parse.greater_equal(v, 0.0),
],
},
"qachannels/*/spectroscopy/length": {
"SetParser": [
lambda v: Parse.greater_equal(v, 4),
lambda v: Parse.smaller_equal(v, ((2**23) - 1) * 4),
lambda v: Parse.multiple_of(v, 4, "down"),
],
},
"qachannels/*/spectroscopy/delay": {
"SetParser": lambda v: Parse.multiple_of(v, 2e-9, "nearest")
},
},
"SHFSG": {
"system/clocks/referenceclock/out/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"system/clocks/referenceclock/out/freq": {
"SetParser": lambda v: Parse.greater_equal(v, 0),
},
"sgchannels/*/centerfreq": {
"SetParser": lambda v: Parse.greater_equal(v, 0),
},
"sgchannels/*/output/on": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"sgchannels/*/output/range": {
"SetParser": [
lambda v: Parse.greater_equal(v, -30),
lambda v: Parse.smaller_equal(v, 10),
lambda v: Parse.multiple_of(v, 5, "nearest"),
],
},
"sgchannels/*/awg/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"sgchannels/*/awg/single": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"sgchannels/*/awg/outputs/*/enables/*": {
"GetParser": Parse.to_bool,
},
"sgchannels/*/awg/outputs/*/gains/*": {
"SetParser": [
lambda v: Parse.smaller_equal(v, 1.0),
lambda v: Parse.greater_equal(v, -1.0),
],
},
"sgchannels/*/oscs/*/freq": {
"SetParser": [
lambda v: Parse.smaller_equal(v, 1e9),
lambda v: Parse.greater_equal(v, -1e9),
],
},
"sgchannels/*/sines/*/phaseshift": {
"SetParser": Parse.phase,
},
"sgchannels/*/sines/*/oscselect": {
"SetParser": [
lambda v: Parse.greater_equal(v, 0),
lambda v: Parse.smaller_equal(v, 7),
lambda v: Parse.multiple_of(v, 1, "nearest"),
],
},
"sgchannels/*/sines/*/harmonic": {
"SetParser": [
lambda v: Parse.greater_equal(v, 1),
lambda v: Parse.smaller_equal(v, 1023),
lambda v: Parse.multiple_of(v, 1, "nearest"),
],
},
"sgchannels/*/sines/*/i/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
"sgchannels/*/sines/*/q/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
},
} | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/parsers.py | parsers.py |
import logging
import typing as t
import time
from collections.abc import Sequence
from zhinst.core import ImpedanceModule as ZIImpedanceModule
from zhinst.toolkit.driver.modules.base_module import BaseModule
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
logger = logging.getLogger(__name__)
class CalibrationStatus(int, Sequence):
"""Wrapper around a Impedance module status.
LabOne reports a status for the impedance module as integers.
The integer needs to be interpreted in a binary format where each
bit represents a stage within the compensation. If the bit is set it
means that the step is completed.
This class wraps around this by both deriving from an integer and a
Sequence. Therefore one can use it like a int but also access the
individual steps through items (e.g. module.step[0]).
Args:
value: Integer value of the status.
.. versionadded:: 0.5.1
"""
def __new__(cls, value: int):
"""New method of the CalibrationStatus.
Args:
value: Integer value of the status.
"""
new_object = super(CalibrationStatus, cls).__new__(cls, value)
new_object._value = value # type: ignore[attr-defined]
new_object._binary = new_object._to_binary() # type: ignore[attr-defined]
return new_object
def __repr__(self):
return ", ".join(
[f"step {i}: {bool(value)}" for i, value in enumerate(self._binary)]
)
def _to_binary(self):
binary = []
num = self._value
i = 0
while num != 0:
bit = int(num % 2)
binary.insert(i, bit)
i = i + 1
num = int(num / 2)
return binary
def __getitem__(self, item):
return self._binary[item] if len(self._binary) > item else 0
def __len__(self):
return len(self._binary)
class ImpedanceModule(BaseModule):
"""Implements a base Impedance Module for Lock-In instruments.
The Impedance Module corresponds to the Cal sub-tab in the LabOne User
Interface Impedance Analyzer tab. It allows the user to perform a
compensation that will be applied to impedance measurements.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/impedance_module.html
Args:
impedance_module: Instance of the core Impedance Module.
session: Session to the Data Server.
.. versionadded:: 0.5.1
"""
def __init__(self, impedance_module: ZIImpedanceModule, session: "Session"):
super().__init__(impedance_module, session)
self.root.update_nodes(
{
"/expectedstatus": {"GetParser": CalibrationStatus},
"/status": {"GetParser": CalibrationStatus},
},
raise_for_invalid_node=False,
)
def wait_done(
self,
step: t.Optional[int] = None,
*,
timeout: float = 20.0,
sleep_time: float = 0.5,
) -> None:
"""Waits until the specified compensation step is complete.
Args:
step: The compensation step to wait for completion.
timeout: The maximum waiting time in seconds for the compensation
to complete (default: 20).
sleep_time: Time in seconds to wait between
requesting the state. (default: 0.5)
Raises:
TimeoutError: The compensation is not completed before timeout.
"""
start_time = time.time()
while (
start_time + timeout >= time.time()
and self.calibrate()
and not self.finished(step)
):
logger.info(f"Progress: {(self.progress() * 100):.1f}%")
time.sleep(sleep_time)
if self.progress() < 1:
raise TimeoutError("Impedance module timed out.")
if not self.finished(step):
if step is None:
raise RuntimeError(
"Impedance module did not reach the status "
f"{CalibrationStatus(self.expectedstatus())} that "
"corresponds to a full compensation. "
f"(current status: {CalibrationStatus(self.status())})"
)
raise RuntimeError(
f"Impedance module did not finish the requested step {step}. "
f"(current status: {CalibrationStatus(self.status())})"
)
def finish(self) -> None:
"""Stop the module."""
self._raw_module.finish()
def finished(self, step: t.Optional[int] = None) -> bool:
"""Check if the calibration or a step of it is finished.
Args:
step: Calibration step. If not None this function checks if the
specified step is finished. Otherwise it checks if the
hole calibration is done.
Returns:
Flag if the calibration or a step is finished.
"""
if step is None:
return self.status() == self.expectedstatus()
return self.status() & (1 << step) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/impedance_module.py | impedance_module.py |
import logging
import typing as t
import time
from enum import IntFlag
from zhinst.core import PidAdvisorModule as ZIPidAdvisorModule
from zhinst.toolkit.driver.modules.base_module import BaseModule
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
logger = logging.getLogger(__name__)
class PIDMode(IntFlag):
"""PID Advisor mode.
P_Gain: Optimize/Tune P gain.
I_Gain: Optimize/Tune I gain.
D_Gain: Optimize/Tune D gain.
D_Filter_Limit: Optimize/Tune D filter limit.
.. versionadded:: 0.5.1
"""
NONE = 0
P_Gain = 1
I_Gain = 2
D_Gain = 4
D_Filter_Limit = 8
class PIDAdvisorModule(BaseModule):
"""PID Advisor Module.
The PID Advisor Module provides the functionality available in the Advisor,
Tuner and Display sub-tabs of the LabOne User Interface’s PID / PLL tab.
The PID Advisor is a mathematical model of the instrument’s PID and can be
used to calculate PID controller parameters for optimal feedback loop
performance. The controller gains calculated by the module can be easily
transferred to the device via the API and the results of the Advisor’s
modeling are available as Bode and step response plot data.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/pid_advisor_module.html
Args:
pid_advisor_module: Instance of the core PID advisor module.
session: Session to the Data Server.
.. versionadded:: 0.5.1
"""
def __init__(self, pid_advisor_module: ZIPidAdvisorModule, session: "Session"):
super().__init__(pid_advisor_module, session)
self.root.update_nodes(
{
"/pid/mode": {
"GetParser": PIDMode,
},
"/tuner/mode": {
"GetParser": PIDMode,
},
},
raise_for_invalid_node=False,
)
def wait_done(self, *, timeout: float = 20.0, sleep_time: float = 2) -> None:
"""Waits until the pid advisor is finished.
Args:
timeout (float): The maximum waiting time in seconds for the
measurement (default: 20).
sleep_time (int): Time in seconds to wait between
requesting sweeper state. (default: 0.5)
Raises:
TimeoutError: The measurement is not completed before
timeout.
"""
start_time = time.time()
while start_time + timeout >= time.time() and (
self.calculate()
or (not self._raw_module.finished() and self.progress() != 1)
):
# When the advisor is started it takes some time to reset the
# progress (and finished) node. This causes weird behavior when
# restarted. This if statement just ignores the progress until it
# is smaller than 100%
if self.progress() < 1:
logger.info(f"Progress: {(self.progress() * 100):.1f}%")
else:
logger.info("Progress: 0.0%")
time.sleep(sleep_time)
if self.calculate() or (
not self._raw_module.finished() and self.progress() != 1
):
raise TimeoutError(f"{self._raw_module.__class__.__name__} timed out.")
logger.info(f"Progress: {(self.progress() * 100):.1f}%") | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/pid_advisor_module.py | pid_advisor_module.py |
import logging
import typing as t
from zhinst.core import ScopeModule as ZIScopeModule
from zhinst.toolkit.driver.modules.base_module import BaseModule
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
logger = logging.getLogger(__name__)
class ScopeModule(BaseModule):
"""Scope Module.
The Scope Module corresponds to the functionality available in the Scope
tab in the LabOne User Interface and provides API users with an interface
to acquire assembled and scaled scope data from the instrument
programmatically.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/scope_module.html
Although it is possible to acquire scope data using the lower-level
subscribe/poll method, the Scope Module provides API users with several
advantages. Specifically, the Scope Module:
* Provides a uniform interface to acquire scope data from all instrument
classes (HF2 scope usage differs from and MF and UHF devices, especially
with regards to scaling).
* Scales and offsets the scope wave data to get physically meaningful
values. If data is polled from the device node using subscribe/poll the
scaling and offset must be applied manually.
* Assembles large multi-block transferred scope data into single complete
records. When the scope is configured to record large scope lengths and
data is directly polled from the device node /DEV…/SCOPES/n/WAVE the data
is split into multiple blocks for efficient transfer of data from the
Data Server to the API; these must then be programmatically reassembled.
The Scope Module performs this assembly and returns complete scope
records (unless used in pass-through mode, mode=0).
* Can be configured to return the FFT of the acquired scope records
(with mode=3) as provided by the Scope Tab in the LabOne UI. FFT data is
not available from the device nodes in the /DEV/…./SCOPES/ branch using
subscribe/poll.
* Can be configured to average the acquired scope records the
averager/parameters.
* Can be configured to return a specific number of scope records using the
historylength parameter.
Args:
scope_module: Instance of the core scope module.
session: Session to the Data Server.
.. versionadded:: 0.5.0
"""
def __init__(self, scope_module: ZIScopeModule, session: "Session"):
super().__init__(scope_module, session)
def finish(self) -> None:
"""Stop the module."""
self._raw_module.finish() | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/scope_module.py | scope_module.py |
import logging
import typing as t
from pathlib import Path
from zhinst.core import DeviceSettingsModule as ZIDeviceSettingsModule
from zhinst.toolkit.driver.modules.base_module import BaseModule
from zhinst.toolkit.nodetree.helper import NodeDict
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.driver.devices import DeviceType
from zhinst.toolkit.session import Session
logger = logging.getLogger(__name__)
class DeviceSettingsModule(BaseModule):
"""Implements the device settings module for storing and loading settings.
The Device Settings Module provides functionality for saving and loading
device settings to and from file. The file is saved in XML format.
For simple save and load two helper functions exist `save_to_file` and
`load_from_file`.
Note: It is not recommend to use this function to read the
device settings. Instead one can use the zhinst-toolkit functionality
to read all settings from a device/subtree from the device directly by
calling it.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/device_settings_module.html
Args:
device_settings_module: Instance of the core Impedance Module.
session: Session to the Data Server.
"""
def __init__(
self, device_settings_module: ZIDeviceSettingsModule, session: "Session"
):
super().__init__(device_settings_module, session)
self.root.update_nodes(
{
"/path": {
"SetParser": self._set_path,
}
},
raise_for_invalid_node=False,
)
def _simple_execution(
self,
command: str,
filename: t.Union[str, Path],
device: t.Union[str, "DeviceType"],
timeout: float = 30,
) -> None:
"""Execute a command on a clean module.
This function creates an new module instance to avoid misconfiguration.
It is also synchronous, meaning it will block until command has
finished.
Args:
command: The command to execute. (`save`, `load`, `read`)
filename: The path to the settings file.
device: The device to load the settings to.
timeout: Max time to wait for the loading to finish.
Raises:
TimeoutError: If the loading of the settings timed out.
"""
filename = Path(filename)
temp_module = self._session.modules.create_device_settings_module()
temp_module.device(device)
temp_module.filename(filename.stem)
temp_module.path(filename.parent)
temp_module.command(command)
temp_module.execute()
try:
# Use finish node instead of function to make advantage of the
# wait for state change functionality.
temp_module.finished.wait_for_state_change(1, timeout=timeout)
except TimeoutError as e:
raise TimeoutError(
f"Unable to load device settings after {timeout} seconds."
) from e
def load_from_file(
self,
filename: t.Union[str, Path],
device: t.Union["DeviceType", str],
timeout: float = 30,
) -> None:
"""Load a LabOne settings file to a device.
This function creates an new module instance to avoid misconfiguration.
It is also synchronous, meaning it will block until loading the
settings has finished.
Args:
filename: The path to the settings file.
device: The device to load the settings to.
timeout: Max time to wait for the loading to finish.
Raises:
TimeoutError: If the loading of the settings timed out.
"""
self._simple_execution("load", filename, device, timeout)
def save_to_file(
self,
filename: t.Union[str, Path],
device: t.Union["DeviceType", str],
timeout: int = 30,
) -> None:
"""Save the device settings to a LabOne settings file.
This function creates an new module instance to avoid misconfiguration.
It is also synchronous, meaning it will block until save operation has
finished.
Args:
filename: The path to the settings file.
device: The device which settings should be saved.
timeout: Max time to wait for the loading to finish.
Raises:
TimeoutError: If the loading of the settings timed out.
"""
self._simple_execution("save", filename, device, timeout)
def read(self) -> NodeDict:
"""Read device settings.
Note: It is not recommend to use this function to read the
device settings. Instead one can use the zhinst-toolkit functionality
to read all settings from a device/subtree from the device directly by
calling it.
>>> device = session.connect_device()
>>> ...
>>> device()
<all device settings>
>>> device.demods()
<all demodulator settings>
Returns:
Device settings.
"""
return super().read() | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/device_settings_module.py | device_settings_module.py |
import logging
import typing as t
from collections import namedtuple
import numpy as np
from zhinst.core import DataAcquisitionModule as ZIDAQModule
from zhinst.toolkit.driver.modules.base_module import BaseModule
from zhinst.toolkit.nodetree.helper import NodeDict
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
logger = logging.getLogger(__name__)
DAQResult = namedtuple("DAQResult", ["header", "value", "time", "frequency", "shape"])
class DAQModule(BaseModule):
"""Data Acquisition Module.
The Data Acquisition Module corresponds to the Data Acquisition tab of the
LabOne User Interface. It enables the user to record and align time and
frequency domain data from multiple instrument signal sources at a defined
data rate. The data may be recorded either continuously or in bursts based
upon trigger criteria analogous to the functionality provided by laboratory
oscilloscopes.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/data_acquisition_module.html
Args:
daq_module: Instance of the core DAQ module.
session: Session to the Data Server.
"""
def __init__(self, daq_module: ZIDAQModule, session: "Session"):
super().__init__(daq_module, session)
self.root.update_nodes(
{
"/triggernode": {
"GetParser": self._get_node,
"SetParser": self._set_node,
}
},
raise_for_invalid_node=False,
)
@staticmethod
def _process_burst(
node: str, burst: t.Dict[str, t.Any], clk_rate: float
) -> DAQResult:
"""Process a single burst into a formatted DAQResult object.
Args:
node: Name of the node of the burst.
burst: raw burst data.
clk_rate: Clock rate [Hz] for converting the timestamps. Only
applies if the raw flag is reset.
Returns:
Processed and formatted burst data.
"""
if "fft" in node:
bin_count = len(burst["value"][0])
bin_resolution = burst["header"]["gridcoldelta"]
frequency = np.arange(bin_count)
bandwidth = bin_resolution * len(frequency)
frequency = frequency * bin_resolution
if "xiy" in node:
frequency = frequency - bandwidth / 2.0 + bin_resolution / 2.0
return DAQResult(
burst.get("header", {}),
burst["value"],
None,
frequency,
burst["value"].shape,
)
timestamp = burst["timestamp"]
return DAQResult(
burst.get("header", {}),
burst["value"],
(timestamp[0] - timestamp[0][0]) / clk_rate,
None,
burst["value"].shape,
)
@staticmethod
def _process_node_data(
node: str, data: t.List[t.Dict[str, t.Any]], clk_rate: float
) -> t.List[t.Union[t.Dict[str, t.Any], DAQResult]]:
"""Process the data of a node.
Only subscribed sample nodes are processed. Other nodes (module native nodes)
are returned in the original format.
Args:
node: Name of the node of the burst.
data: raw data for the node.
clk_rate: Clock rate [Hz] for converting the timestamps. Only
applies if the raw flag is reset.
Returns:
Processed and formatted node data.
"""
if isinstance(data[0], dict):
return [DAQModule._process_burst(node, burst, clk_rate) for burst in data]
return data
def finish(self) -> None:
"""Stop the module.
.. versionadded:: 0.5.0
"""
self._raw_module.finish()
def finished(self) -> bool:
"""Check if the acquisition has finished.
Returns:
Flag if the acquisition has finished.
.. versionadded:: 0.5.0
"""
return self._raw_module.finished()
def trigger(self) -> None:
"""Execute a manual trigger.
.. versionadded:: 0.5.0
"""
self._raw_module.trigger()
def read(self, *, raw: bool = False, clk_rate: float = 60e6) -> NodeDict:
"""Read the acquired data from the module.
The data is split into bursts.
Args:
raw: Flag if the acquired data from the subscribed device
device nodes should be converted into the DAQResult format
(raw = False) or not. (default = False)
clk_rate: Clock rate [Hz] for converting the timestamps. Only
applies if the raw flag is reset.
Returns:
Result of the burst grouped by the signals.
"""
raw_result = self._raw_module.read(flat=True)
if raw:
return NodeDict(raw_result)
return NodeDict(
{
node: self._process_node_data(node, data, clk_rate)
for node, data in raw_result.items()
}
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/daq_module.py | daq_module.py |
import logging
import typing as t
from functools import partial
from zhinst.core import PrecompensationAdvisorModule as TKPrecompensationAdvisorModule
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.driver.modules.base_module import BaseModule
logger = logging.getLogger(__name__)
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
class PrecompensationAdvisorModule(Node):
"""Precompensation Advisor Module.
This module provides the functionality available in the LabOne User
Interface’s Precompensation Tab. In essence the precompensation allows a
pre-distortion or pre-emphasis to be applied to a signal before it leaves
the instrument, to compensate for undesired distortions caused by the
device under test (DUT). The Precompensation Advisor module simulates the
precompensation filters in the device, allowing the user to experiment with
different filter settings and filter combinations to obtain an optimal
output signal, before using the setup in the actual device.
For a complete documentation see the LabOne user manual
https://docs.zhinst.com/labone_programming_manual/precompensation_advisor_module.html
Note:
Unlike most other LabOne modules, this module does not expose any
functions. Each time one or more filter parameters are changed, the
module re-runs the simulation and the results can be read via the
wave/output, wave/output/forwardwave and wave/output/backwardwave
parameters.
Args:
raw_module: zhinst.core module.
session: Session to the Data Server.
"""
def __init__(self, raw_module: TKPrecompensationAdvisorModule, session: "Session"):
self._raw_module = raw_module
self._session = session
super().__init__(NodeTree(raw_module), tuple())
self.root.update_nodes(
{
"/device": {
"GetParser": partial(BaseModule._get_device, self._session),
"SetParser": BaseModule._set_device,
}
},
raise_for_invalid_node=False,
)
def __repr__(self):
return str(f"{self._raw_module.__class__.__name__}({repr(self._session)})")
@property
def raw_module(self) -> TKPrecompensationAdvisorModule:
"""Underlying core module."""
return self._raw_module | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/precompensation_advisor_module.py | precompensation_advisor_module.py |
import logging
import time
import typing as t
from os import PathLike, fspath
from functools import partial
from zhinst.core import ModuleBase
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import NodeDict
logger = logging.getLogger(__name__)
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.driver.devices import DeviceType
from zhinst.toolkit.session import Session
ZIModule = t.TypeVar("ZIModule", bound=ModuleBase)
class BaseModule(Node):
"""Generic toolkit driver for a LabOne Modules.
All module specific class are derived from this class.
It exposes the nodetree and also implements common functions valid for all
modules.
It also can be used directly, e.g. for modules that have no special class
in toolkit.
Args:
raw_module: zhinst.core module.
session: Session to the Data Server.
"""
def __init__(self, raw_module: ZIModule, session: "Session"):
self._raw_module = raw_module
self._session = session
super().__init__(NodeTree(raw_module), tuple())
self.root.update_nodes(
{
"/device": {
"GetParser": partial(self._get_device, self._session),
"SetParser": self._set_device,
},
"/directory": {
"SetParser": self._set_path,
},
"/save/directory": {
"SetParser": self._set_path,
},
},
raise_for_invalid_node=False,
)
def __repr__(self):
return str(f"{self._raw_module.__class__.__name__}({repr(self._session)})")
@staticmethod
def _get_device(session, serial: str) -> t.Union["DeviceType", str]:
"""Convert a device serial into a toolkit device object.
Args:
serial: Serial of the device
Returns:
Toolkit device object. If the serial does not
match to a connected device the serial is returned instead.
"""
try:
return session.devices[serial]
except (RuntimeError, KeyError):
return serial
@staticmethod
def _set_device(value: t.Union["DeviceType", str]) -> str:
"""Convert a toolkit device object into a serial string.
Args:
serial: A toolkit device object
(can also be a serial string directly)
Returns:
str: device serial
"""
try:
return value.serial # type: ignore
except AttributeError:
return value
def _get_node(self, node: str) -> t.Union[Node, str]:
"""Convert a raw node string into a toolkit node.
Args:
node (str): raw node string
Returns:
Toolkit node. (if the node can not be converted the raw node
string is returned)
"""
try:
return self._session.raw_path_to_node(node.replace(".", "/"), module=self)
except (KeyError, RuntimeError):
logger.error(
f"Could not resolve {node} into a node of the "
f"{self._raw_module.__class__.__name__} or "
"a connected device."
)
return node
@staticmethod
def _set_node(signal: t.Union[Node, str]) -> str:
"""Convert a toolkit node into a raw node string.
Args:
signal: Toolkit node
Returns:
str: raw string node
"""
try:
node = signal.node_info.path # type: ignore
except AttributeError:
node = signal
return node
@staticmethod
def _set_path(path: t.Union[str, PathLike]) -> str:
"""Convert a Pathlike object into a string for LabOne.
LabOne only accepts strings for the path node. This function converts
any Pathlike object into a string so that is processed by LabOne
correctly.
If the object implements the `absolute` method (e.g. pathlib.Path)
it is used to use the absolute path. This is important since LabOne
operates in a different directory.
Args:
path (str): Path to send to the device.
Returns:
String representation of the path like object.
"""
if hasattr(path, "absolute"):
path = path.absolute() # type: ignore[union-attr]
return fspath(path)
def wait_done(self, *, timeout: float = 20.0, sleep_time: float = 0.5) -> None:
"""Waits until the module is finished.
Warning: Only usable for modules that make use of the `/finished` node.
Args:
timeout (float): The maximum waiting time in seconds for the
measurement (default: 20).
sleep_time (int): Time in seconds to wait between
requesting sweeper state. (default: 0.5)
Raises:
TimeoutError: The measurement is not completed before
timeout.
"""
start_time = time.time()
while (
start_time + timeout >= time.time()
and not self._raw_module.finished()
and self.progress() != 1
):
logger.info(f"Progress: {(self.progress() * 100):.1f}%")
time.sleep(sleep_time)
if not self._raw_module.finished() and self.progress() != 1:
raise TimeoutError(f"{self._raw_module.__class__.__name__} timed out.")
logger.info(f"Progress: {(self.progress() * 100):.1f}%")
def progress(self) -> float:
"""Progress of the execution.
Returns:
Progress of the execution with a number between 0 and 1
"""
return self._raw_module.progress()[0]
def subscribe(self, signal: t.Union[Node, str]) -> None:
"""Subscribe to a node.
The node can either be a node of this module or of a connected device.
Args:
signal: Node that should be subscribed to.
.. versionchanged 0.5.0 Add support for raw string signals
"""
try:
self._raw_module.subscribe(signal.node_info.path) # type: ignore
except AttributeError:
self._raw_module.subscribe(signal)
def unsubscribe(self, signal: t.Union[Node, str]) -> None:
"""Unsubscribe from a node.
The node can either be a node of this module or of a connected device.
Args:
signal: Node that should be unsubscribed from.
.. versionchanged 0.5.0 Add support for raw string signals
"""
try:
self._raw_module.unsubscribe(signal.node_info.path) # type: ignore
except AttributeError:
self._raw_module.unsubscribe(signal)
def execute(self) -> None:
"""Start the module execution.
Subscription or unsubscription is not possible until the execution is
finished.
.. versionadded:: 0.4.1
"""
self._raw_module.execute()
def read(self) -> NodeDict:
"""Read scope data.
If the recording is still ongoing only a subset of data is returned.
Returns:
Scope data.
"""
return NodeDict(self._raw_module.read(flat=True))
@property
def raw_module(self) -> ZIModule: # type: ignore [type-var]
"""Underlying core module."""
return self._raw_module | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/base_module.py | base_module.py |
import json
import logging
import re
import typing as t
from collections import OrderedDict
from dataclasses import asdict
from enum import IntEnum
from pathlib import Path
import numpy as np
from zhinst.core import ziDAQServer
from zhinst.utils.shf_sweeper import AvgConfig, EnvelopeConfig, RfConfig
from zhinst.utils.shf_sweeper import ShfSweeper as CoreSweeper
from zhinst.utils.shf_sweeper import SweepConfig, TriggerConfig
from zhinst.toolkit.driver.parsers import Parse
from zhinst.toolkit.exceptions import ToolkitError
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.connection_dict import ConnectionDict
from zhinst.toolkit.nodetree.helper import NodeDoc
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.driver.devices import DeviceType
from zhinst.toolkit.session import Session
logger = logging.getLogger(__name__)
class SHFQASweeper(Node):
"""Toolkit adaption for the zhinst.utils.SHFSweeper.
For now the general sweeper module does not support the SHFQA. However a
python based implementation called ``SHFSweeper`` does already provide
this functionality. The ``SHFSweeper`` is part of the ``zhinst`` module
and can be found in the utils.
Toolkit wraps around the ``SHFSweeper`` and exposes a interface that is
similar to the LabOne modules, meaning the parameters are exposed in a
node tree like structure.
All parameters can be accessed through their corresponding node:
* device: Device to run the sweeper with
* sweep: Frequency range settings for a sweep
* rf: RF in- and output settings for a sweep
* average: Averaging settings for a sweep
* trigger: Settings for the trigger
* envelope: Settings for defining a complex envelope for pulsed spectroscopy
The underlying module is updated with the parameter changes automatically.
Every functions from the underlying SHFSweeper module is exposed and can be
used in the same way.
Args:
daq_server: Client Session that should be used by the sweeper.
session: Session to the Data Server.
"""
def __init__(self, session: "Session"):
self._config_classes = {
SweepConfig: ("sweep", "sweep_config"),
RfConfig: ("rf", "rf_config"),
AvgConfig: ("average", "avg_config"),
TriggerConfig: ("trigger", "trig_config"),
EnvelopeConfig: ("envelope", "envelope_config"),
}
self._renamed_nodes = {
"imp50": "input_impedance",
"use_sequencer": "mode",
"force_sw_trigger": "sw_trigger_mode",
}
super().__init__(self._create_nodetree(), tuple())
self._daq_server = ziDAQServer(
session.daq_server.host,
session.daq_server.port,
6,
)
self._raw_module = CoreSweeper(self._daq_server, "")
self._session = session
self.root.update_nodes(
{
"/device": {
"GetParser": self._get_device,
"SetParser": self._set_device,
},
"/envelope/enable": {
"GetParser": Parse.to_bool,
"SetParser": Parse.from_bool,
},
},
raise_for_invalid_node=False,
)
def __repr__(self):
return str(f"SHFQASweeper({repr(self._session)})")
def _get_device(self, serial: str) -> t.Union["DeviceType", str]:
"""Convert a device serial into a toolkit device object.
Args:
serial: Serial of the device
Returns:
Toolkit device object. If the serial does not
match to a connected device the serial is returned instead.
"""
try:
return self._session.devices[serial]
except (RuntimeError, KeyError):
return serial
def _set_device(self, value: t.Union["DeviceType", str]) -> str:
"""Convert a toolkit device object into a serial string.
Args:
value: Toolkit device object
(can also be a serial string directly)
Returns:
Device serial
"""
serial = ""
try:
serial = value.serial # type: ignore
except AttributeError:
serial = value
self._raw_module = CoreSweeper(self._daq_server, serial)
return serial
def _create_nodetree(self) -> NodeTree:
"""Create node tree for the SHFQA sweeper.
Uses the hardcoded "resources/shfqa_sweeper_nodes.json" information
and the SHFSweeper data classes to automatically create a valid node
tree. (Automatically adds new parameters with dummy information)
Returns:
node tree for the shfqa sweeper
"""
json_path = Path(__file__).parent / "../../resources/shfqa_sweeper_nodes.json"
with json_path.open("r") as file:
raw_info: NodeDoc = json.loads(file.read())
values: t.Dict[str, t.Any] = {}
info: NodeDoc = {}
for config_class, parent_name in self._config_classes.items():
for parameter, default_value in asdict(config_class()).items():
node = (
f"/{parent_name[0]}/{self._renamed_nodes.get(parameter,parameter)}"
)
try:
info[node] = raw_info[node]
except KeyError:
# node not in json
logger.warning(f"{node} is missing in {json_path}.")
type_mapping = {
int: "Integer (64 bit)",
float: "Double",
str: "String",
bool: "Integer (64 bit)",
np.ndarray: "ZIVectorData",
}
info[node] = {
"Node": node,
"Description": node,
"Properties": "Read, Write",
"Unit": "None",
"Type": type_mapping[type(default_value)],
}
if "Options" in info[node]:
option_map = {}
for key, value in info[node]["Options"].items():
options = re.findall(r'"(.+?)"[,:]+', value)
option_map.update({x: int(key) for x in options})
values[node] = option_map.get(default_value, default_value)
else:
values[node] = default_value
info["/device"] = raw_info["/device"]
values["/device"] = ""
info["/envelope/enable"] = raw_info["/envelope/enable"]
values["/envelope/enable"] = 0
info["/actual_settling_time"] = raw_info["/actual_settling_time"]
values["/actual_settling_time"] = lambda: self._raw_module.actual_settling_time
info["/actual_hold_off_time"] = raw_info["/actual_hold_off_time"]
values["/actual_hold_off_time"] = lambda: self._raw_module.actual_hold_off_time
info["/predicted_cycle_time"] = raw_info["/predicted_cycle_time"]
values["/predicted_cycle_time"] = lambda: self._raw_module.predicted_cycle_time
return NodeTree(ConnectionDict(values, info))
def _update_settings(self) -> None:
"""Update the ShfSweeper settings from the node tree.
Converts the nodetree into a valid configuration for the SHFSweeper.
"""
if not self.device():
raise ToolkitError(
"The device serial needs to be set before using the module."
)
data = OrderedDict()
for config_class, parent_name in self._config_classes.items():
config = OrderedDict()
for parameter in asdict(config_class()):
value = self[parent_name[0]][
self._renamed_nodes.get(parameter, parameter)
]()
if isinstance(value, IntEnum):
value = value.name
config[parameter] = value
data[parent_name[1]] = config_class(**config)
# special treatment for the envelope config
if not self.envelope.enable():
data.pop("envelope_config")
# special treatment for the imp50
try:
data["trig_config"].imp50 = data["trig_config"].imp50 == "imp50"
except AttributeError:
logger.warning(
"imp50 setting is no longer available in the shf_sweeper class."
)
# special treatment for mode
try:
data["sweep_config"].use_sequencer = (
data["sweep_config"].use_sequencer == "sequencer-based"
)
except AttributeError:
logger.warning(
"use_sequencer setting is no longer available in the shf_sweeper class."
)
# special treatment for trigger source
try:
data["trig_config"].source = (
data["trig_config"].source
if data["trig_config"].source != "auto"
else None
)
except AttributeError:
logger.warning(
"source setting is no longer available in the shf_sweeper class."
)
# special treatment for the force_sw_trigger
try:
data["trig_config"].force_sw_trigger = (
data["trig_config"].force_sw_trigger == "force"
)
except AttributeError:
logger.warning(
"force_sw_trigger setting is no longer available in the shf_sweeper."
)
self._raw_module.configure(**data)
def run(self) -> dict:
"""Perform a sweep with the specified settings.
This method wraps around the `run` method of
`zhinst.utils.shf_sweeper`
Returns:
A dictionary with measurement data of the last sweep.
"""
self._update_settings()
return self._raw_module.run()
def get_result(self) -> dict:
"""Get the measurement data of the last sweep.
This method wraps around the `get_result` method of
`zhinst.utils.shf_sweeper`
Returns:
A dictionary with measurement data of the last sweep.
"""
self._update_settings()
return self._raw_module.get_result()
def plot(self) -> None:
"""Plot power over frequency for last sweep.
This method wraps around the `plot` method of
`zhinst.utils.shf_sweeper`
"""
self._update_settings()
return self._raw_module.plot()
def get_offset_freq_vector(self) -> t.Any:
"""Get vector of frequency points.
This method wraps around the `get_offset_freq_vector` method of
`zhinst.utils.shf_sweeper`
Returns:
Vector of frequency points.
"""
self._update_settings()
return self._raw_module.get_offset_freq_vector() | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/modules/shfqa_sweeper.py | shfqa_sweeper.py |
import logging
import numpy as np
import zhinst.utils.shfqa as utils
from zhinst.toolkit.interface import AveragingMode
from zhinst.toolkit.nodetree import Node, NodeTree
logger = logging.getLogger(__name__)
class Spectroscopy(Node):
"""Spectroscopy node.
Implements basic functionality of the spectroscopy, e.g allowing the user to
read the result logger data.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
serial: Serial of the device.
index: Index of the corresponding awg channel
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
index: int,
):
super().__init__(root, tree)
self._daq_server = root.connection
self._serial = serial
self._index = index
def configure_result_logger(
self,
*,
result_length: int,
num_averages: int = 1,
averaging_mode: AveragingMode = AveragingMode.CYCLIC,
) -> None:
"""Configures the result logger for spectroscopy mode.
Args:
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n.
averaging_mode: Averaging order of the result.
"""
utils.configure_result_logger_for_spectroscopy(
self._daq_server,
self._serial,
self._index,
result_length=result_length,
num_averages=num_averages,
averaging_mode=int(averaging_mode),
)
def run(self) -> None:
"""Resets and enables the spectroscopy result logger."""
utils.enable_result_logger(
self._daq_server,
self._serial,
self._index,
mode="spectroscopy",
)
def stop(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Stop the result logger.
Args:
timeout: The maximum waiting time in seconds for the
Spectroscopy (default: 10).
sleep_time: Time in seconds to wait between
requesting Spectroscopy state
Raises:
TimeoutError: If the result logger could not been stopped within the
given time.
"""
self.result.enable(False)
try:
self.result.enable.wait_for_state_change(
0, timeout=timeout, sleep_time=sleep_time
)
except TimeoutError as error:
raise TimeoutError(
f"{repr(self)}: The result logger could not been stopped "
f"within the specified timeout ({timeout}s)."
) from error
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Wait until spectroscopy is finished.
Args:
timeout (float): The maximum waiting time in seconds for the
Spectroscopy (default: 10).
sleep_time (float): Time in seconds to wait between
requesting Spectroscopy state
Raises:
TimeoutError: if the spectroscopy recording is not completed within the
given time.
"""
try:
self.result.enable.wait_for_state_change(
0, timeout=timeout, sleep_time=sleep_time
)
except TimeoutError as error:
raise TimeoutError(
f"{repr(self)}: The spectroscopy did not finish "
f"within the specified timeout ({timeout}s)."
) from error
def read(
self,
*,
timeout: float = 10,
) -> np.array:
"""Waits until the logger finished recording and returns the measured data.
Args:
timeout: Maximum time to wait for data in seconds (default = 10s)
Returns:
An array containing the result logger data.
"""
return utils.get_result_logger_data(
self._daq_server,
self._serial,
self._index,
mode="spectroscopy",
timeout=timeout,
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/nodes/spectroscopy.py | spectroscopy.py |
import json
import logging
import typing as t
from zhinst.core import compile_seqc
from zhinst.toolkit.driver.nodes.command_table_node import CommandTableNode
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import (
lazy_property,
create_or_append_set_transaction,
)
from zhinst.toolkit.waveform import Waveforms
from zhinst.toolkit.sequence import Sequence
logger = logging.getLogger(__name__)
class AWG(Node):
"""AWG node.
This class implements the basic functionality for the device specific
arbitrary waveform generator.
Besides the upload/compilation of sequences it offers the upload of
waveforms and command tables.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
session: Underlying session.
serial: Serial of the device.
index: Index of the corresponding awg channel
device_type: Device type
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
index: int,
device_type: str,
device_options: str,
):
Node.__init__(self, root, tree)
self._daq_server = root.connection
self._serial = serial
self._index = index
self._device_type = device_type
self._device_options = device_options
def enable_sequencer(self, *, single: bool) -> None:
"""Starts the sequencer of a specific channel.
Warning:
This function is synchronous and blocks until the sequencer is enabled.
When working with multiple instruments this function is the wrong
approach and the sequencer should be enabled asynchronously.
(For more information please take a look at the awg example in the
toolkit documentation.)
Args:
single: Flag if the sequencer should be disabled after finishing
execution.
Raises:
RuntimeError: If the sequencer could not be enabled.
.. versionchanged:: 0.5.0
Check the acknowledged value instead of using `wait_for_state_change`.
"""
self.single(single)
if not self.enable(1, deep=True):
raise RuntimeError(
"The sequencer could not be enabled. Please ensure that the "
"sequencer program is loaded and configured correctly."
)
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the AWG is finished.
Args:
timeout: The maximum waiting time in seconds for the generator
(default: 10).
sleep_time: Time in seconds to wait between requesting generator
state
Raises:
RuntimeError: If continuous mode is enabled
TimeoutError: If the sequencer program did not finish within
the specified timeout time
"""
if not self.single():
raise RuntimeError(
f"{repr(self)}: The generator is running in continuous mode, "
"it will never be finished."
)
try:
self.enable.wait_for_state_change(0, timeout=timeout, sleep_time=sleep_time)
except TimeoutError as error:
raise TimeoutError(
f"{repr(self)}: The execution of the sequencer program did not finish "
f"within the specified timeout ({timeout}s)."
) from error
def compile_sequencer_program(
self,
sequencer_program: t.Union[str, Sequence],
**kwargs: t.Union[str, int],
) -> t.Tuple[bytes, t.Dict[str, t.Any]]:
"""Compiles a sequencer program for the specific device.
Args:
sequencer_program: The sequencer program to compile.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Returns:
elf: Binary ELF data for sequencer.
extra: Extra dictionary with compiler output.
Examples:
>>> elf, compile_info = device.awgs[0].compile_sequencer_program(seqc)
>>> device.awgs[0].elf.data(elf)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the compilation failed.
.. versionadded:: 0.4.0
"""
if "SHFQC" in self._device_type:
kwargs["sequencer"] = "sg" if "sgchannels" in self._tree else "qa"
elif "HDAWG" in self._device_type and "samplerate" not in kwargs:
kwargs["samplerate"] = self.root.system.clocks.sampleclock.freq()
return compile_seqc(
str(sequencer_program),
self._device_type,
self._device_options,
self._index,
**kwargs,
)
def load_sequencer_program(
self,
sequencer_program: t.Union[str, Sequence],
**kwargs: t.Union[str, int],
) -> t.Dict[str, t.Any]:
"""Compiles the given sequencer program on the AWG Core.
Warning:
After uploading the sequencer program one needs to wait before for
the awg core to become ready before it can be enabled.
The awg core indicates the ready state through its `ready` node.
(device.awgs[0].ready() == True)
Args:
sequencer_program: Sequencer program to be uploaded.
Keyword Args:
samplerate: Target sample rate of the sequencer. Only allowed/
necessary for HDAWG devices. Must correspond to the samplerate
used by the device (device.system.clocks.sampleclock.freq()).
If not specified the function will get the value itself from
the device. It is recommended passing the samplerate if more
than one sequencer code is uploaded in a row to speed up the
execution time.
wavepath: path to directory with waveforms. Defaults to path used
by LabOne UI or AWG Module.
waveforms: waveform CSV files separated by ';'
output: name of embedded ELF filename.
Examples:
>>> compile_info = device.awgs[0].load_sequencer_program(seqc)
>>> device.awgs[0].ready.wait_for_state_change(1)
>>> device.awgs[0].enable(True)
Raises:
RuntimeError: `sequencer_program` is empty.
RuntimeError: If the upload or compilation failed.
.. versionadded:: 0.3.4
`sequencer_program` does not accept empty strings
.. versionadded:: 0.4.0
Use offline compiler instead of AWG module to compile the sequencer
program. This speeds of the compilation and also enables parallel
compilation/upload.
"""
elf, compiler_info = self.compile_sequencer_program(sequencer_program, **kwargs)
self.elf.data(elf)
return compiler_info
def write_to_waveform_memory(
self, waveforms: Waveforms, indexes: list = None
) -> None:
"""Writes waveforms to the waveform memory.
The waveforms must already be assigned in the sequencer program.
Args:
waveforms: Waveforms that should be uploaded.
indexes: Specify a list of indexes that should be uploaded. If
nothing is specified all available indexes in waveforms will
be uploaded. (default = None)
.. versionchanged:: 0.4.2
Removed `validate` flag and functionality. The validation check is
now done in the `Waveforms.validate` function.
"""
with create_or_append_set_transaction(self._root):
for waveform_index in waveforms.keys():
if indexes and waveform_index not in indexes:
continue
self.root.transaction.add(
self.waveform.waves[waveform_index],
waveforms.get_raw_vector(waveform_index),
)
def read_from_waveform_memory(self, indexes: t.List[int] = None) -> Waveforms:
"""Read waveforms from the waveform memory.
Args:
indexes: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Waveform object with the downloaded waveforms.
"""
waveform_info = json.loads(self.waveform.descriptors()).get("waveforms", [])
nodes = [
self.waveform.node_info.path + f"/waves/{index}"
for index in range(len(waveform_info))
if (indexes is None or index in indexes)
# Entries that have a play_config equals to zero ar dummies/fillers
# and can therefore be ignored.
and int(waveform_info[index]["play_config"])
]
nodes_str = ",".join(nodes)
waveforms_raw = self._daq_server.get(nodes_str, settingsonly=False, flat=True)
waveforms = Waveforms()
for node, waveform in waveforms_raw.items():
slot = int(node.rsplit("/", 1)[-1])
waveforms.assign_native_awg_waveform(
slot,
waveform[0]["vector"],
channels=int(waveform_info[slot].get("channels", 1)),
markers_present=bool(int(waveform_info[slot].get("marker_bits")[0])),
)
return waveforms
@lazy_property
def commandtable(self) -> t.Optional[CommandTableNode]:
"""Command table module."""
if self["commandtable"].is_valid():
return CommandTableNode(
self._root, self._tree + ("commandtable",), self._device_type
)
return None | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/nodes/awg.py | awg.py |
import logging
import typing as t
import zhinst.utils.shfqa as utils
from zhinst.core import ziDAQServer
from zhinst.toolkit.nodetree import Node, NodeTree
logger = logging.getLogger(__name__)
class SHFScope(Node):
"""SHFQA Scope Node.
Implements basic functionality of the scope node, e.g allowing the user to
read the data.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
daq_server: Instance of the ziDAQServer
serial: Serial of the device.
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
daq_server: ziDAQServer,
serial: str,
):
super().__init__(root, tree)
self._daq_server = daq_server
self._serial = serial
def run(
self, *, single: bool = True, timeout: float = 10, sleep_time: float = 0.005
) -> None:
"""Run the scope recording.
Args:
timeout: The maximum waiting time in seconds for the Scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not start within the specified
timeout.
"""
self.single(single)
self.enable(True)
try:
self.enable.wait_for_state_change(1, timeout=timeout, sleep_time=sleep_time)
except TimeoutError as error:
raise TimeoutError(
"Scope could not been started within "
f"the specified timeout ({timeout})s"
) from error
def stop(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Stop the scope recording.
Args:
timeout: The maximum waiting time in seconds for the scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not stop within the specified
timeout.
"""
self.enable(False)
try:
self.enable.wait_for_state_change(0, timeout=timeout, sleep_time=sleep_time)
except TimeoutError as error:
raise TimeoutError(
"Scope could not been stopped within "
f"the specified timeout ({timeout})s"
) from error
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.005) -> None:
"""Wait until the scope recording is finished.
Args:
timeout: The maximum waiting time in seconds for the Scope
(default = 10).
sleep_time: Time in seconds to wait between requesting the progress
and records values (default = 0.005).
Raises:
TimeoutError: The scope did not finish within the specified
timeout.
"""
try:
self.enable.wait_for_state_change(0, timeout=timeout, sleep_time=sleep_time)
except TimeoutError as error:
raise TimeoutError(
"Scope recording did not finish "
f"within the specified timeout({timeout})s."
) from error
def configure(
self,
*,
input_select: t.Dict[int, str],
num_samples: int,
trigger_input: str,
num_segments: int = 1,
num_averages: int = 1,
trigger_delay: float = 0,
) -> None:
"""Configures the scope for a measurement.
Args:
input_select: Map of a specific scope channel an their signal
source, e.g. "channel0_signal_input". (For a list of available
values use `available_inputs`)
num_samples: Number samples to recorded in a scope shot.
trigger_input: Specifies the trigger source of the scope
acquisition - if set to None, the self-triggering mode of the
scope becomes active, which is useful e.g. for the GUI.
For a list of available trigger values use
`available_trigger_inputs`.
num_segments: Number of distinct scope shots to be returned after
ending the acquisition.
num_averages: Specifies how many times each segment should be
averaged on hardware; to finish a scope acquisition, the number
of issued triggers must be equal to num_segments * num_averages.
trigger_delay: delay in samples specifying the time between the
start of data acquisition and reception of a trigger.
"""
utils.configure_scope(
self._daq_server,
self._serial,
input_select=input_select,
num_samples=num_samples,
trigger_input=trigger_input,
num_segments=num_segments,
num_averages=num_averages,
trigger_delay=trigger_delay,
)
def read(
self,
*,
timeout: float = 10,
) -> tuple:
"""Read out the recorded data from the scope.
Args:
timeout: The maximum waiting time in seconds for the
Scope (default: 10).
Returns:
(recorded_data, recorded_data_range, scope_time)
Raises:
TimeoutError: if the scope recording is not completed before
timeout.
"""
return utils.get_scope_data(self._daq_server, self._serial, timeout=timeout)
@property
def available_trigger_inputs(self) -> t.List[str]:
"""List of the available trigger sources for the scope."""
return [
option.enum for option in self.trigger.channel.node_info.options.values()
]
@property
def available_inputs(self) -> t.List[str]:
"""List of the available signal sources for the scope channels."""
return [
option.enum
for option in self.channels[0].inputselect.node_info.options.values()
] | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/nodes/shfqa_scope.py | shfqa_scope.py |
import typing as t
import numpy as np
import zhinst.utils.shfqa.multistate as utils
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.node import NodeList
from zhinst.toolkit.nodetree.helper import (
lazy_property,
create_or_append_set_transaction,
)
class Qudit(Node):
"""Single Qudit node.
Implements basic functionality of a single qudit node, e.g applying the
basic configuration.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
readout_channel: Index of the readout channel this qudit belongs to.
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
readout_channel: int,
):
super().__init__(root, tree)
self._daq_server = root.connection
self._serial = serial
self._readout_channel = readout_channel
def configure(
self,
qudit_settings: utils.QuditSettings,
enable: bool = True,
) -> None:
"""Compiles a list of transactions to apply the qudit settings to the device.
Args:
qudit_settings: The qudit settings to be configured.
enable: Whether to enable the qudit. (default: True)
"""
settings = utils.get_settings_transaction(
self._serial,
self._readout_channel,
int(self._tree[-1]),
qudit_settings,
enable=enable,
)
with create_or_append_set_transaction(self._root):
for node, value in settings:
self._root.transaction.add(node, value)
class MultiState(Node):
"""MultiState node.
Implements basic functionality of the MultiState node.
Args:
root: Root of the nodetree.
tree: Tree (node path as tuple) of the current node.
serial: Serial of the device.
index: Index of the corresponding readout channel.
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
index: int,
):
super().__init__(root, tree)
self._daq_server = root.connection
self._serial = serial
self._index = index
def get_qudits_results(self) -> t.Dict[int, np.ndarray]:
"""Downloads the qudit results from the device and group them by qudit.
This function accesses the multistate nodes to determine which
integrators were used for which qudit to able to group the results by
qudit.
Returns:
A dictionary with the qudit index keys and result vector values.
"""
return utils.get_qudits_results(
self._daq_server,
self._serial,
self._index,
)
@lazy_property
def qudits(self) -> t.Sequence[Qudit]:
"""A Sequence of Qudits."""
return NodeList(
[
Qudit(
self._root,
self._tree + ("qudits", str(i)),
self._serial,
self._index,
)
for i in range(len(self["qudits"]))
],
self._root,
self._tree + ("qudits",),
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/nodes/multistate.py | multistate.py |
import json
import string
import typing as t
from pathlib import Path
from zhinst.toolkit.command_table import CommandTable
from zhinst.toolkit.nodetree import Node, NodeTree
_CT_RESOURCE_PATH = Path(__file__).parent.parent.parent / "resources"
_CT_FILES = {
"shfqc": _CT_RESOURCE_PATH / "ct_schema_shfsg.json",
"shfsg": _CT_RESOURCE_PATH / "ct_schema_shfsg.json",
"hdawg": _CT_RESOURCE_PATH / "ct_schema_hdawg.json",
}
class CommandTableNode(Node):
"""CommandTable node.
This class implements the basic functionality of the command table allowing
the user to load and upload their own command table.
A dedicated class called ``CommandTable`` exists that is the preferred way
to create a valid command table. For more information about the
``CommandTable`` refer to the corresponding example or the documentation
of that class directly.
Args:
root: Node used for the upload of the command table
tree: Tree (node path as tuple) of the current node
device_type: Device type.
"""
def __init__(
self, root: NodeTree, tree: t.Tuple[str, ...], device_type: str
) -> None:
Node.__init__(self, root, tree)
self._device_type = device_type
self._schema: t.Optional[t.Dict[str, t.Any]] = None
def check_status(self) -> bool:
"""Check status of the command table.
Returns:
Flag if a valid command table is loaded into the device.
Raises:
RuntimeError: If the command table upload into the device failed.
"""
ct_status = self.status()
if ct_status >> 3:
raise RuntimeError(
"Uploading of data to the command table failed "
"due to a JSON parsing error."
)
return ct_status == 1
def load_validation_schema(self) -> t.Dict[str, t.Any]:
"""Load device command table validation schema.
Returns:
JSON validation schema for the device command tables.
"""
if self._schema is None:
try:
self._schema = json.loads(self.schema())
except KeyError:
device_type_striped = self._device_type.lower().rstrip(string.digits)
with open(_CT_FILES[device_type_striped], encoding="utf-8") as file_:
self._schema = json.load(file_)
return self._schema # type: ignore
def upload_to_device(
self,
ct: t.Union[CommandTable, str, dict],
*,
validate: bool = False,
check_upload: bool = True,
) -> None:
"""Upload command table into the device.
The command table can either be specified through the dedicated
``CommandTable`` class or in a raw format, meaning a json string or json
dict. In the case of a json string or dict the command table is
validated by default against the schema provided by the device.
Args:
ct: Command table.
validate: Flag if the command table should be validated. (Only
applies if the command table is passed as a raw json string or
json dict)
check_upload: Flag if the upload should be validated by calling
`check_status`. This is not mandatory bat strongly recommended
since the device does not raise an error when it rejects the
command table. This Flag is ignored when called from within a
transaction.
Raises:
RuntimeError: If the command table upload into the device failed.
zhinst.toolkit.exceptions.ValidationError: Incorrect schema.
.. versionchanged:: 0.4.2
New Flag `check_upload` that makes the upload check optional.
`check_status` is only called when not in a ongoing transaction.
"""
try:
self.data(json.dumps(ct.as_dict())) # type: ignore
except AttributeError:
if validate:
ct_new = CommandTable(self.load_validation_schema())
ct_new.update(ct)
self.upload_to_device(ct_new)
elif isinstance(ct, str):
self.data(ct)
else:
self.data(json.dumps(ct))
if (
check_upload
and not self._root.transaction.in_progress()
and not self.check_status()
):
raise RuntimeError(
"No valid command table reported by the device after upload."
)
def load_from_device(self) -> CommandTable:
"""Load command table from the device.
Returns:
command table.
"""
ct = CommandTable(self.load_validation_schema(), active_validation=True)
ct.update(self.data())
return ct | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/nodes/command_table_node.py | command_table_node.py |
import logging
import typing as t
import numpy as np
import zhinst.utils.shfqa as utils
from zhinst.toolkit.exceptions import ToolkitError
from zhinst.toolkit.interface import AveragingMode
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import lazy_property
from zhinst.toolkit.waveform import Waveforms
from zhinst.toolkit.driver.nodes.multistate import MultiState
logger = logging.getLogger(__name__)
class Readout(Node):
"""Readout node.
Implements basic functionality of the readout, e.g allowing the user to
write the integration weight.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
serial: Serial of the device.
index: Index of the corresponding awg channel
max_qubits_per_channel: Max qubits per channel
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
index: int,
max_qubits_per_channel: int,
):
super().__init__(root, tree)
self._daq_server = root.connection
self._serial = serial
self._index = index
self._max_qubits_per_channel = max_qubits_per_channel
def configure_result_logger(
self,
*,
result_source: str,
result_length: int,
num_averages: int = 1,
averaging_mode: AveragingMode = AveragingMode.CYCLIC,
) -> None:
"""Configures the result logger for readout mode.
Args:
result_source: String-based tag to select the result source in readout
mode, e.g. "result_of_integration" or "result_of_discrimination".
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n
averaging_mode: Select the averaging order of the result, with
0 = cyclic and 1 = sequential.
"""
utils.configure_result_logger_for_readout(
self._daq_server,
self._serial,
self._index,
result_source=result_source,
result_length=result_length,
num_averages=num_averages,
averaging_mode=int(averaging_mode),
)
def run(self) -> None:
"""Reset and enable the result logger."""
utils.enable_result_logger(
self._daq_server,
self._serial,
self._index,
mode="readout",
)
def stop(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Stop the result logger.
Args:
timeout: The maximum waiting time in seconds for the Readout
(default: 10).
sleep_time: Sleep interval in seconds. (default = 0.05)
Raises:
TimeoutError: The result logger could not been stopped within the
given time.
"""
self.result.enable(False)
try:
self.result.enable.wait_for_state_change(
0, timeout=timeout, sleep_time=sleep_time
)
except TimeoutError as error:
raise TimeoutError(
f"{repr(self)}: The result logger could not been stopped "
f"within the specified timeout ({timeout}s)."
) from error
def wait_done(self, *, timeout: float = 10, sleep_time: float = 0.05) -> None:
"""Wait until the readout is finished.
Args:
timeout: The maximum waiting time in seconds for the Readout
(default: 10).
sleep_time: Sleep interval in seconds. (default = 0.05)
Raises:
TimeoutError: if the readout recording is not completed within the
given time.
"""
try:
self.result.enable.wait_for_state_change(
0, timeout=timeout, sleep_time=sleep_time
)
except TimeoutError as error:
raise TimeoutError(
f"{repr(self)}: The readout did not finish "
f"within the specified timeout ({timeout}s)."
) from error
def read(
self,
*,
timeout: float = 10,
) -> np.array:
"""Waits until the logger finished recording and returns the measured data.
Args:
timeout: Maximum time to wait for data in seconds (default = 10s)
Returns:
Result logger data.
"""
return utils.get_result_logger_data(
self._daq_server, self._serial, self._index, mode="readout", timeout=timeout
)
def write_integration_weights(
self,
weights: t.Union[Waveforms, dict],
*,
integration_delay: float = 0.0,
clear_existing: bool = True,
) -> None:
"""Configures the weighted integration.
Args:
weights: Dictionary containing the complex weight vectors, where
keys correspond to the indices of the integration units to be
configured.
integration_delay: Delay in seconds before starting the readout.
clear_existing: Flag whether to clear the waveform memory before
the present upload.
"""
if (
len(weights.keys()) > 0
and max(weights.keys()) >= self._max_qubits_per_channel
):
raise ToolkitError(
f"The device only has {self._max_qubits_per_channel} qubits per channel"
f", but {max(weights.keys())} were specified."
)
waveform_dict = {}
if isinstance(weights, Waveforms):
for slot in weights.keys():
waveform_dict[slot] = weights.get_raw_vector(slot, complex_output=True)
else:
waveform_dict = weights
utils.configure_weighted_integration(
self._daq_server,
self._serial,
self._index,
weights=waveform_dict,
integration_delay=integration_delay,
clear_existing=clear_existing,
)
def read_integration_weights(self, slots: t.List[int] = None) -> Waveforms:
"""Read integration weights from the waveform memory.
Args:
slots: List of weight slots to read from the device. If not specified
all available weights will be downloaded.
Returns:
Mutable mapping of the downloaded weights.
"""
nodes = []
if slots is not None:
for slot in slots:
nodes.append(self.integration.weights[slot].wave.node_info.path)
else:
nodes.append(self.integration.weights["*"].wave.node_info.path)
nodes_str = ",".join(nodes)
weights_raw = self._daq_server.get(nodes_str, settingsonly=False, flat=True)
weights = Waveforms()
for slot, weight in enumerate(weights_raw.values()):
weights[slot] = weight[0]["vector"]
return weights
@lazy_property
def multistate(self) -> MultiState:
"""Multistate discrimination node tree branch."""
return MultiState(
self._root, self._tree + ("multistate",), self._serial, self._index
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/nodes/readout.py | readout.py |
import logging
import typing as t
import zhinst.utils.shfqa as utils
from zhinst.toolkit.driver.devices.base import BaseInstrument
from zhinst.toolkit.driver.nodes.awg import AWG
from zhinst.toolkit.driver.nodes.readout import Readout
from zhinst.toolkit.driver.nodes.shfqa_scope import SHFScope
from zhinst.toolkit.driver.nodes.spectroscopy import Spectroscopy
from zhinst.toolkit.exceptions import ToolkitError
from zhinst.toolkit.interface import SHFQAChannelMode
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import (
lazy_property,
create_or_append_set_transaction,
)
from zhinst.toolkit.nodetree.node import NodeList
from zhinst.toolkit.waveform import Waveforms
logger = logging.getLogger(__name__)
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
class Generator(AWG):
"""Generator node.
Implements basic functionality of the generator allowing the user to write
and upload their *'.seqC'* code.
In contrast to other AWG Sequencers, e.g. from the HDAWG, SHFSG
it does not provide writing access to the Waveform Memories
and hence does not come with predefined waveforms such as `gauss`
or `ones`. Therefore, all waveforms need to be defined in Python
and uploaded to the device using `upload_waveforms` method.
Args:
root: Root of the nodetree
tree: Tree (node path as tuple) of the current node
daq_server: Instance of the ziDAQServer
serial: Serial of the device.
index: Index of the corresponding awg channel
max_qubits_per_channel: Max qubits per channel
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
serial: str,
index: int,
max_qubits_per_channel: int,
device_type: str,
device_options: str,
):
super().__init__(root, tree, serial, index, device_type, device_options)
self._max_qubits_per_channel = max_qubits_per_channel
def write_to_waveform_memory(
self, pulses: t.Union[Waveforms, dict], *, clear_existing: bool = True
) -> None:
"""Writes pulses to the waveform memory.
Args:
pulses: Waveforms that should be uploaded.
clear_existing: Flag whether to clear the waveform memory before the
present upload. (default = True)
"""
if (
len(pulses.keys()) > 0
and max(pulses.keys()) >= self._max_qubits_per_channel
):
raise ToolkitError(
f"The device only has {self._max_qubits_per_channel} qubits per channel"
f", but {max(pulses.keys())} were specified."
)
with create_or_append_set_transaction(self._root):
if clear_existing:
self.clearwave(1)
if isinstance(pulses, Waveforms):
for slot in pulses.keys():
self.waveforms[slot].wave(
pulses.get_raw_vector(slot, complex_output=True)
)
else:
for slot, waveform in pulses.items():
self.waveforms[slot].wave(waveform)
def read_from_waveform_memory(self, slots: t.List[int] = None) -> Waveforms:
"""Read pulses from the waveform memory.
Args:
slots: List of waveform indexes to read from the device. If not
specified all assigned waveforms will be downloaded.
Returns:
Mutable mapping of the downloaded waveforms.
"""
nodes = []
if slots is not None:
for slot in slots:
nodes.append(self.waveforms[slot].wave.node_info.path)
else:
nodes.append(self.waveforms["*"].wave.node_info.path)
nodes_str = ",".join(nodes)
waveforms_raw = self._daq_server.get(nodes_str, settingsonly=False, flat=True)
waveforms = Waveforms()
for slot, waveform in enumerate(waveforms_raw.values()):
waveforms[slot] = waveform[0]["vector"]
return waveforms
def configure_sequencer_triggering(
self, *, aux_trigger: str, play_pulse_delay: float = 0.0
) -> None:
"""Configure the sequencer triggering.
Args:
aux_trigger: Alias for the trigger source used in the sequencer.
For the list of available values, use `available_aux_trigger_inputs`
play_pulse_delay: Delay in seconds before the start of waveform playback.
"""
# Only Digital Trigger 1
utils.configure_sequencer_triggering(
self._daq_server,
self._serial,
self._index,
aux_trigger=aux_trigger,
play_pulse_delay=play_pulse_delay,
)
@property
def available_aux_trigger_inputs(self) -> t.List[str]:
"""List of available aux trigger sources for the generator."""
return [
option.enum
for option in self.auxtriggers[0].channel.node_info.options.values()
]
class QAChannel(Node):
"""Quantum Analyzer Channel for the SHFQA.
:class:`QAChannel` implements basic functionality to configure QAChannel
settings of the :class:`SHFQA` instrument.
Besides the :class:`Generator`, :class:`Readout` and :class:`Sweeper`
modules it also provides an easy access to commonly used `QAChannel` parameters.
Args:
device: SHFQA device object.
session: Underlying session.
tree: Node tree (node path as tuple) of the corresponding node.
"""
def __init__(
self,
device: "SHFQA",
session: "Session",
tree: t.Tuple[str, ...],
):
super().__init__(device.root, tree)
self._index = int(tree[-1])
self._device = device
self._serial = device.serial
self._session = session
def configure_channel(
self,
*,
input_range: int,
output_range: int,
center_frequency: float,
mode: SHFQAChannelMode,
) -> None:
"""Configures the RF input and output of a specified channel.
Args:
input_range: Maximal range of the signal input power in dBm
output_range: Maximal range of the signal output power in dBm
center_frequency: Center frequency of the analysis band [Hz]
mode: Select between spectroscopy and readout mode.
"""
utils.configure_channel(
self._session.daq_server,
self._serial,
self._index,
input_range=input_range,
output_range=output_range,
center_frequency=center_frequency,
mode=mode.value,
)
@lazy_property
def generator(self) -> Generator:
"""Generator."""
return Generator(
self._root,
self._tree + ("generator",),
self._device.serial,
self._index,
self._device.max_qubits_per_channel,
self._device.device_type,
self._device.device_options,
)
@lazy_property
def readout(self) -> Readout:
"""Readout."""
return Readout(
self._root,
self._tree + ("readout",),
self._device.serial,
self._index,
self._device.max_qubits_per_channel,
)
@lazy_property
def spectroscopy(self) -> Spectroscopy:
"""Spectroscopy."""
return Spectroscopy(
self._root,
self._tree + ("spectroscopy",),
self._device.serial,
self._index,
)
class SHFQA(BaseInstrument):
"""High-level driver for the Zurich Instruments SHFQA."""
def start_continuous_sw_trigger(
self, *, num_triggers: int, wait_time: float
) -> None:
"""Issues a specified number of software triggers.
Issues a specified number of software triggers with a certain wait time
in between. The function guarantees reception and proper processing of
all triggers by the device, but the time between triggers is
non-deterministic by nature of software triggering. Only use this
function for prototyping and/or cases without strong timing requirements.
Args:
num_triggers: Number of triggers to be issued
wait_time: Time between triggers in seconds
"""
utils.start_continuous_sw_trigger(
self._session.daq_server,
self.serial,
num_triggers=num_triggers,
wait_time=wait_time,
)
@lazy_property
def max_qubits_per_channel(self) -> int:
"""Maximum number of supported qubits per channel."""
return utils.max_qubits_per_channel(self._session.daq_server, self.serial)
@lazy_property
def qachannels(self) -> t.Sequence[QAChannel]:
"""A Sequence of QAChannels."""
return NodeList(
[
QAChannel(self, self._session, self._tree + ("qachannels", str(i)))
for i in range(len(self["qachannels"]))
],
self._root,
self._tree + ("qachannels",),
)
@lazy_property
def scopes(self) -> t.Sequence[SHFScope]:
"""A Sequence of SHFScopes."""
return NodeList(
[
SHFScope(
self._root,
self._tree + ("scopes", str(i)),
self._session.daq_server,
self.serial,
)
for i in range(len(self["scopes"]))
],
self._root,
self._tree + ("scopes",),
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/devices/shfqa.py | shfqa.py |
import logging
import typing as t
import zhinst.utils.shfsg as utils
from zhinst.toolkit.driver.devices.base import BaseInstrument
from zhinst.toolkit.driver.nodes.awg import AWG
from zhinst.toolkit.nodetree import Node
from zhinst.toolkit.nodetree.helper import lazy_property
from zhinst.toolkit.nodetree.node import NodeList
logger = logging.getLogger(__name__)
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
class AWGCore(AWG):
"""AWG Core Node."""
def configure_marker_and_trigger(
self,
*,
trigger_in_source: str,
trigger_in_slope: str,
marker_out_source: str,
) -> None:
"""Configures the trigger inputs and marker outputs of the AWG.
Args:
trigger_in_source: Alias for the trigger input used by the
sequencer. For a list of available values use:
`available_trigger_inputs`
trigger_in_slope: Alias for the slope of the input trigger
used by sequencer. For a list of available values use
`available_trigger_inputs`
marker_out_source: Alias for the marker output source used by
the sequencer. For a list of available values use
`available_trigger_slopes`
"""
utils.configure_marker_and_trigger(
self._daq_server,
self._serial,
self._index,
trigger_in_source=trigger_in_source,
trigger_in_slope=trigger_in_slope,
marker_out_source=marker_out_source,
)
@property
def available_trigger_inputs(self) -> t.List[str]:
"""List the available trigger sources for the sequencer."""
return [
option.enum
for option in self.auxtriggers[0].channel.node_info.options.values()
]
@property
def available_trigger_slopes(self) -> t.List[str]:
"""List the available trigger slopes for the sequencer."""
return [
option.enum
for option in self.auxtriggers[0].slope.node_info.options.values()
]
@property
def available_marker_outputs(self) -> t.List[str]:
"""List the available trigger marker outputs for the sequencer."""
return [
option.enum
for option in self.root.sgchannels[
self._index
].marker.source.node_info.options.values()
]
class SGChannel(Node):
"""Signal Generator Channel for the SHFSG.
:class:`SGChannel` implements basic functionality to configure SGChannel
settings of the :class:`SHFSG` instrument.
Args:
device: SHFQA device object.
session: Underlying session.
tree: Node tree (node path as tuple) of the corresponding node.
"""
def __init__(
self,
device: "SHFSG",
session: "Session",
tree: t.Tuple[str, ...],
):
super().__init__(device.root, tree)
self._index = int(tree[-1])
self._device = device
self._serial = device.serial
self._session = session
def configure_channel(
self,
*,
enable: bool,
output_range: int,
center_frequency: float,
rf_path: bool,
) -> None:
"""Configures the RF input and output.
Args:
enable: Flag if the signal output should be enabled.
output_range: Maximal range of the signal output power in dBm
center_frequency: Center frequency before modulation
rf_path: Flag if the RF(True) or LF(False) path should be
configured.
"""
utils.configure_channel(
self._session.daq_server,
self._device.serial,
self._index,
enable=int(enable),
output_range=output_range,
center_frequency=center_frequency,
rflf_path=int(rf_path),
)
def configure_pulse_modulation(
self,
*,
enable: bool,
osc_index: int = 0,
osc_frequency: float = 100e6,
phase: float = 0.0,
global_amp: float = 0.5,
gains: tuple = (1.0, -1.0, 1.0, 1.0),
sine_generator_index: int = 0,
) -> None:
"""Configure the pulse modulation.
Configures the sine generator to digitally modulate the AWG output, for
generating single sideband AWG signals
Args:
enable: Flag if the modulation should be enabled.
osc_index: Selects which oscillator to use
osc_frequency: Oscillator frequency used to modulate the AWG
outputs. (default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
global_amp: Global scale factor for the AWG outputs. (default = 0.5)
gains: Sets the four amplitudes used for single sideband generation.
Default values correspond to upper sideband with a positive
oscillator frequency. (default = (1.0, -1.0, 1.0, 1.0))
sine_generator_index: Selects which sine generator to use on a
given channel.
"""
utils.configure_pulse_modulation(
self._session.daq_server,
self._device.serial,
self._index,
enable=int(enable),
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
global_amp=global_amp,
gains=gains,
sine_generator_index=sine_generator_index,
)
def configure_sine_generation(
self,
*,
enable: bool,
osc_index: int = 0,
osc_frequency: float = 100e6,
phase: float = 0.0,
gains: tuple = (0.0, 1.0, 1.0, 0.0),
sine_generator_index: int = 0,
) -> None:
"""Configures the sine generator output.
Configures the sine generator output of a specified channel for generating
continuous wave signals without the AWG.
Args:
enable: Flag if the sine generator output should be enabled.
osc_index: Selects which oscillator to use
osc_frequency: Oscillator frequency used by the sine generator
(default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
gains: Sets the four amplitudes used for single sideband
generation. Default values correspond to upper sideband with a
positive oscillator frequency.
Gains are set in the following order I/sin, I/cos, Q/sin, Q/cos.
(default = (0.0, 1.0, 1.0, 0.0))
sine_generator_index: Selects which sine generator to use on a given
channel
"""
utils.configure_sine_generation(
self._session.daq_server,
self._device.serial,
self._index,
enable=int(enable),
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
gains=gains,
sine_generator_index=sine_generator_index,
)
@property
def awg_modulation_freq(self) -> float:
"""Modulation frequency of the AWG.
Depends on the selected oscillator.
"""
selected_osc = self.sines[0].oscselect()
return self.oscs[selected_osc].freq()
@lazy_property
def awg(self) -> AWGCore:
"""AWG."""
return AWGCore(
self._root,
self._tree + ("awg",),
self._device.serial,
self._index,
self._device.device_type,
self._device.device_options,
)
class SHFSG(BaseInstrument):
"""High-level driver for the Zurich Instruments SHFSG."""
@lazy_property
def sgchannels(self) -> t.Sequence[SGChannel]:
"""A Sequence of SG Channels."""
return NodeList(
[
SGChannel(self, self._session, self._tree + ("sgchannels", str(i)))
for i in range(len(self["sgchannels"]))
],
self._root,
self._tree + ("sgchannels",),
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/devices/shfsg.py | shfsg.py |
import copy
import json
import logging
import re
import typing as t
import warnings
from pathlib import Path
from zhinst.utils._version import version as utils_version_str
from zhinst.core import __version__ as zhinst_version_str
from zhinst.toolkit._min_version import _MIN_DEVICE_UTILS_VERSION, _MIN_LABONE_VERSION
from zhinst.toolkit.driver.parsers import node_parser
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import lazy_property
from zhinst.toolkit.exceptions import ToolkitError
logger = logging.getLogger(__name__)
if t.TYPE_CHECKING: # pragma: no cover
from zhinst.toolkit.session import Session
class BaseInstrument(Node):
"""Generic toolkit driver for a Zurich Instrument device.
All device specific class are derived from this class.
It exposes the nodetree and also implements common functions valid for all
devices.
It also can be used directly, e.g. for instrument types that have no special
class in toolkit.
Args:
serial: Serial number of the device, e.g. *'dev12000'*.
The serial number can be found on the back panel of the instrument.
device_type: Type of the device.
session: Session to the Data Server
"""
def __init__(
self,
serial: str,
device_type: str,
session: "Session",
):
self._serial = serial
self._device_type = device_type
self._session = session
try:
self._options = session.daq_server.getString(f"/{serial}/features/options")
except RuntimeError:
self._options = ""
# HF2 does not support listNodesJSON so we have the information hardcoded
# (the node of HF2 will not change any more so this is safe)
preloaded_json = None
if "HF2" in self._device_type:
preloaded_json = self._load_preloaded_json(
Path(__file__).parent / "../../resources/nodedoc_hf2.json"
)
self._streaming_nodes: t.Optional[t.List[Node]] = None
nodetree = NodeTree(
self._session.daq_server,
prefix_hide=self._serial,
list_nodes=[f"/{self._serial}/*"],
preloaded_json=preloaded_json,
)
# Add predefined parseres (in node_parser) to nodetree nodes
nodetree.update_nodes(
node_parser.get(self.__class__.__name__, {}), raise_for_invalid_node=False
)
super().__init__(nodetree, tuple())
def __repr__(self):
options = f"({self._options})" if self._options else ""
options = options.replace("\n", ",")
return str(
f"{self.__class__.__name__}({self._device_type}" f"{options},{self.serial})"
)
def factory_reset(self, *, deep: bool = True, timeout: int = 30) -> None:
"""Load the factory default settings.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after loading the factory preset (default: True).
timeout: Timeout in seconds to wait for the factory reset to
complete.
Raises:
ToolkitError: If the factory preset could not be loaded.
TimeoutError: If the factory reset did not complete within the
given timeout.
"""
self.system.preset.load(1, deep=deep)
self.system.preset.busy.wait_for_state_change(0, timeout=timeout)
if self.system.preset.error(deep=True)[1]:
raise ToolkitError(
f"Failed to load factory preset to device {self.serial.upper()}."
)
logger.info(f"Factory preset is loaded to device {self.serial.upper()}.")
@staticmethod
def _version_string_to_tuple(version: str) -> t.Tuple[int, int, int]:
"""Converts a version string into a version tuple.
Args:
version: Version
Returns:
Version as a tuple of ints
"""
result = [0] * 3
for i, value in enumerate(version.split(".")):
try:
result[i] = int(value)
except ValueError:
if i < 3: # ignore dev verisons
result[i] = 0
return result[0], result[1], result[2]
@staticmethod
def _check_python_versions(
zi_python_version: t.Tuple[int, int, int],
zi_utils_version: t.Tuple[int, int, int],
) -> None:
"""Check if the minimum required zhinst packages are installed.
Checks if all zhinst packages that toolkit require have the minimum
required version installed.
Args:
zi_python_version: zhinst.core package version
zi_utils_version: zhinst.utils package version
Raises:
ToolkitError: If the zhinst.core version does not match the
minimum requirements for zhinst.toolkit
ToolkitError: If the zhinst.utils version does not match the
minimum requirements for zhinst.toolkit
"""
if zi_python_version < BaseInstrument._version_string_to_tuple(
_MIN_LABONE_VERSION
):
raise ToolkitError(
"zhinst.core version does not match the minimum required version "
f"for zhinst.toolkit {zi_python_version} < {_MIN_LABONE_VERSION}. "
"Use `pip install --upgrade zhinst` to get the latest version."
)
if zi_utils_version < BaseInstrument._version_string_to_tuple(
_MIN_DEVICE_UTILS_VERSION
):
raise ToolkitError(
"zhinst.utils version does not match the minimum required "
f"version for zhinst.toolkit {zi_utils_version} < "
f"{_MIN_DEVICE_UTILS_VERSION}. Use `pip install "
"--upgrade zhinst.utils` to get the latest version."
)
@staticmethod
def _check_labone_version(
zi_python_version: t.Tuple[int, int, int],
labone_version: t.Tuple[int, int, int],
) -> None:
"""Check that the LabOne version matches the zhinst version.
Args:
zi_python_version: zhinst.core package version
labone_version: LabOne DataServer version
Raises:
ToolkitError: If the zhinst.core version does not match the
version of the connected LabOne DataServer.
"""
if labone_version[:2] < zi_python_version[:2]:
raise ToolkitError(
"The LabOne version is smaller than the zhinst.core version. "
f"{labone_version} < {zi_python_version}. "
"Please install the latest/matching LabOne version from "
"https://www.zhinst.com/support/download-center."
)
if labone_version[:2] > zi_python_version[:2]:
raise ToolkitError(
"the zhinst.core version is smaller than the LabOne version "
f"{zi_python_version} < {labone_version}. "
"Please install the latest/matching version from pypi.org."
)
if labone_version[-1] != zi_python_version[-1]:
warnings.warn(
"The patch version of zhinst.core and the LabOne DataServer "
f"mismatch {labone_version[-1]} ! {zi_python_version[-1]}.",
RuntimeWarning,
stacklevel=2,
)
def _check_firmware_update_status(self) -> None:
"""Check if the firmware matches LabOne version.
Raises:
ConnectionError: If the device is currently updating
ToolkitError: If the firmware revision does not match to the
version of the connected LabOne DataServer.
"""
device_info = json.loads(self._session.daq_server.getString("/zi/devices"))[
self.serial.upper()
]
status_flag = device_info["STATUSFLAGS"]
if status_flag & 1 << 8:
raise ConnectionError(
"The device is currently updating please try again after the update "
"process is complete"
)
if status_flag & 1 << 4 or status_flag & 1 << 5:
raise ToolkitError(
"The Firmware does not match the LabOne version. "
"Please update the firmware (e.g. in the LabOne UI)"
)
if status_flag & 1 << 6 or status_flag & 1 << 7:
raise ToolkitError(
"The Firmware does not match the LabOne version. "
"Please update LabOne to the latest version from "
"https://www.zhinst.com/support/download-center."
)
def check_compatibility(self) -> None:
"""Check if the software stack is compatible.
Only if all versions and revisions of the software stack match stability
can be ensured. The following criteria are checked:
* minimum required zhinst-utils package is installed
* minimum required zhinst-core package is installed
* zhinst package matches the LabOne Data Server version
* firmware revision matches the LabOne Data Server version
Raises:
ConnectionError: If the device is currently updating
ToolkitError: If one of the above mentioned criterion is not
fulfilled
"""
self._check_python_versions(
self._version_string_to_tuple(zhinst_version_str),
self._version_string_to_tuple(utils_version_str),
)
labone_version_str = self._session.about.version()
labone_revision_str = str(self._session.about.revision())[4:]
labone_version = self._version_string_to_tuple(
labone_version_str + "." + labone_revision_str
)
self._check_labone_version(
self._version_string_to_tuple(zhinst_version_str), labone_version
)
self._check_firmware_update_status()
def get_streamingnodes(self) -> t.List[Node]:
"""Create a list with all streaming nodes available.
Returns:
Available streaming node.
"""
if self._streaming_nodes is None:
self._streaming_nodes = []
for node, info in self:
if "Stream" in info.get("Properties"):
self._streaming_nodes.append(node)
return self._streaming_nodes
def _load_preloaded_json(self, filename: Path) -> t.Optional[dict]:
"""Load a preloaded json and match the existing nodes.
Args:
Filename for the preloaded json.
Returns:
Loaded JSON if the file exists.
"""
if not filename.is_file():
return None
raw_file = filename.open("r").read()
raw_file = raw_file.replace("devxxxx", self.serial.lower())
raw_file = raw_file.replace("DEVXXXX", self.serial.upper())
json_raw = json.loads(raw_file)
existing_nodes = self._session.daq_server.listNodes(
f"/{self.serial}/*", recursive=True, leavesonly=True
)
preloaded_json = {}
for node in existing_nodes:
node_name = re.sub(r"(?<!values)\/[0-9]*?$", "/n", node.lower())
node_name = re.sub(r"\/[0-9]*?\/", "/n/", node_name)
json_element = copy.deepcopy(json_raw.get(node_name))
if json_element:
json_element["Node"] = node.upper()
preloaded_json[node.lower()] = json_element
elif not node.startswith("/zi/"):
logger.warning(f"unkown node {node}")
return preloaded_json
def set_transaction(self) -> t.ContextManager:
"""Context manager for a transactional set.
Can be used as a context in a with statement and bundles all node set
commands into a single transaction. This reduces the network overhead
and often increases the speed.
Within the with block a set commands to a node will be buffered
and bundled into a single command at the end automatically.
(All other operations, e.g. getting the value of a node, will not be
affected)
Warning:
The set is always performed as deep set if called on device nodes.
Examples:
>>> with device.set_transaction():
device.test[0].a(1)
device.test[1].a(2)
"""
return self._root.set_transaction()
@property
def serial(self) -> str:
"""Instrument specific serial."""
return self._serial
@property
def session(self) -> "Session":
"""Underlying session to the data server.
.. versionadded:: 0.5.1
"""
return self._session
@property
def device_type(self) -> str:
"""Type of the instrument (e.g. MFLI)."""
return self._device_type
@lazy_property
def device_options(self) -> str:
"""Enabled options of the instrument."""
return self.features.options() | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/devices/base.py | base.py |
import logging
import time
from typing import List, Union
from zhinst.toolkit.driver.devices.base import BaseInstrument
from zhinst.toolkit.exceptions import ToolkitError
logger = logging.getLogger(__name__)
class PQSC(BaseInstrument):
"""High-level driver for the Zurich Instruments PQSC."""
def arm(self, *, deep=True, repetitions: int = None, holdoff: float = None) -> None:
"""Prepare PQSC for triggering the instruments.
This method configures the execution engine of the PQSC and
clears the register bank. Optionally, the *number of triggers*
and *hold-off time* can be set when specified as keyword
arguments. If they are not specified, they are not changed.
Note that the PQSC is disabled at the end of the hold-off time
after sending out the last trigger. Therefore, the hold-off time
should be long enough such that the PQSC is still enabled when
the feedback arrives. Otherwise, the feedback cannot be processed.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after stopping the PQSC and clearing the
register bank (default: True).
repetitions: If specified, the number of triggers sent
over ZSync ports will be set (default: None).
holdoff: If specified, the time between repeated
triggers sent over ZSync ports will be set. It has a
minimum value and a granularity of 100 ns
(default: None).
"""
# Stop the PQSC if it is already running
self.stop(deep=deep)
if repetitions is not None:
self.execution.repetitions(repetitions)
if holdoff is not None:
self.execution.holdoff(holdoff)
# Clear register bank
self.feedback.registerbank.reset(1, deep=deep)
def run(self, *, deep: bool = True) -> None:
"""Start sending out triggers.
This method activates the trigger generation to trigger all
connected instruments over ZSync ports.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after enabling the PQSC (default: True).
"""
self.execution.enable(True, deep=deep)
def arm_and_run(self, *, repetitions: int = None, holdoff: float = None) -> None:
"""Arm the PQSC and start sending out triggers.
Simply combines the methods arm and run. A synchronization
is performed between the device and the data server after
arming and running the PQSC.
Args:
repetitions: If specified, the number of triggers sent
over ZSync ports will be set (default: None).
holdoff: If specified, the time between repeated
triggers sent over ZSync ports will be set. It has a
minimum value and a granularity of 100 ns
(default: None).
"""
self.arm(deep=True, repetitions=repetitions, holdoff=holdoff)
self.run(deep=True)
def stop(self, *, deep: bool = True) -> None:
"""Stop the trigger generation.
Args:
deep: A flag that specifies if a synchronization
should be performed between the device and the data
server after disabling the PQSC (default: True).
"""
self.execution.enable(False, deep=deep)
def wait_done(self, *, timeout: float = 10.0, sleep_time: float = 0.005) -> None:
"""Wait until trigger generation and feedback processing is done.
Args:
timeout: The maximum waiting time in seconds for the
PQSC (default: 10.0).
sleep_time: Time in seconds to wait between
requesting PQSC state
Raises:
TimeoutError: If the PQSC is not done sending out all
triggers and processing feedback before the timeout.
"""
try:
self.execution.enable.wait_for_state_change(
0, timeout=timeout, sleep_time=sleep_time
)
except TimeoutError as error:
raise TimeoutError("PQSC timed out.") from error
def check_ref_clock(
self, *, timeout: float = 30.0, sleep_time: float = 1.0
) -> bool:
"""Check if reference clock is locked successfully.
Args:
timeout: Maximum time in seconds the program waits
(default: 30.0).
sleep_time: Time in seconds to wait between
requesting the reference clock status (default: 1)
Raises:
TimeoutError: If the process of locking to the reference clock
exceeds the specified timeout.
"""
ref_clock_status = self.system.clocks.referenceclock.in_.status
ref_clock = self.system.clocks.referenceclock.in_.source
ref_clock_actual = self.system.clocks.referenceclock.in_.sourceactual
try:
ref_clock_status.wait_for_state_change(
2, invert=True, timeout=timeout, sleep_time=sleep_time
)
except TimeoutError as error:
raise TimeoutError(
"Timeout during locking to reference clock signal"
) from error
if ref_clock_status() == 0:
return True
if ref_clock_status() == 1 and ref_clock_actual() != ref_clock():
ref_clock("internal", deep=True)
logger.error(
f"There was an error locking the device({self.serial}) "
f"onto reference clock signal. Automatically switching to internal "
f"reference clock. Please try again."
)
return False
def check_zsync_connection(
self,
inputs: Union[List[int], int, List[BaseInstrument], BaseInstrument],
*,
timeout: float = 10.0,
sleep_time: float = 0.1,
) -> Union[List[bool], bool]:
"""Check if a ZSync connection is established.
Checks the current status of the instrument connected to the given ports.
If a instrument(s) is given instead of a port number, first finds the correct
port number(s).
Args:
inputs: The port numbers to check the ZSync connection for.
It can either be a single port number given as integer, a list
of several port numbers an instrument or a list of instruments.
timeout: Maximum time in seconds the program waits (default: 10.0).
sleep_time: Time in seconds to wait between requesting the reference
clock status (default: 0.1)
.. versionchanged:: 0.6.1: Reduce default timeout and sleep_time.
.. versionchanged:: 0.6.1: Raise an error if the port is in a faulty
state, instead of return False.
Raises:
TimeoutError: If the process of establishing a ZSync connection on
one of the specified ports exceeds the specified timeout.
"""
inputs_list = inputs if isinstance(inputs, list) else [inputs]
start_time = time.time()
# Check the status of all ports
status = []
for input in inputs_list:
# Convert the instrument into a port, if needed
if isinstance(input, BaseInstrument):
port = self.find_zsync_worker_port(
input,
timeout=max(0, timeout - (time.time() - start_time)),
sleep_time=sleep_time,
)
else:
port = input
# Check or wait until the connection is ready
status.append(
self._check_zsync_connection(
port,
timeout=max(0, timeout - (time.time() - start_time)),
sleep_time=sleep_time,
)
)
return status if isinstance(inputs, list) else status[0]
def _check_zsync_connection(
self, port: int, timeout: float, sleep_time: float
) -> bool:
"""Check if the ZSync connection on the given port is successful.
This function checks the current status of the instrument
connected to the given port.
Args:
ports: Port number to check the ZSync connection for.
timeout: Maximum time in seconds the program waits.
sleep_time: Time in seconds to wait between requesting the status
Raises:
TimeoutError: If the process of establishing a ZSync connection the
specified port exceeds the specified timeout.
"""
status_node = self.zsyncs[port].connection.status
try:
# Waits until the status node is "connected" (2)
status_node.wait_for_state_change(2, timeout=timeout, sleep_time=sleep_time)
except TimeoutError as error:
status = status_node()
err_msg = (
"Timeout while establishing ZSync connection to the instrument "
f"on the port {port}."
)
if status == 0:
# No connection
err_msg += "No instrument detected."
elif status == 1:
# In progress
err_msg += (
"Connection still in progress. Consider increasing the timeout."
)
elif status == 3:
# Error
err_msg += (
"Impossible to establish a connect. Check cabling and FW version"
)
raise TimeoutError(err_msg) from error
return True
def find_zsync_worker_port(
self,
device: BaseInstrument,
timeout: float = 10.0,
sleep_time: float = 0.1,
) -> int:
"""Find the ID of the PQSC ZSync port connected to a given device.
The function checks until the given timeout for the specified device to
show up in the connection list.
Args:
device: device for which the connected ZSync port shall be found.
timeout: Maximum time in seconds the program waits (default: 10.0).
sleep_time: Time in seconds to wait between requesting the port
serials list (default: 0.1)
.. versionchanged:: 0.6.1: Added timeout and sleep_time parameters.
Returns:
Index of the searched PQSC ZSync port.
Raises:
ToolkitError: If the given device doesn't appear to be connected
to the PQSC via ZSync.
.. versionadded:: 0.5.1
"""
device_serial = device.serial[3:]
start = time.time()
while time.time() - start < timeout:
node_to_serial_dict = self.zsyncs["*"].connection.serial()
if device_serial in node_to_serial_dict.values():
break
time.sleep(sleep_time)
else:
raise ToolkitError(
"No ZSync connection found between the PQSC "
f"{self.serial} and the device {device.serial}."
)
# Get the node of the ZSync connected to the device
# (will have the form "/devXXXX/zsyncs/N/connection/serial")
serial_to_node_dict = {
serial: node for node, serial in node_to_serial_dict.items()
}
device_zsync_node = serial_to_node_dict[device_serial]
# Just interested in knowing N: split in
# ['', 'devXXXX', 'zsyncs', 'N', 'connection', 'serial']
# and take fourth value
return int(device_zsync_node.split("/")[3]) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/devices/pqsc.py | pqsc.py |
import typing as t
from zhinst.toolkit.driver.devices.base import BaseInstrument
from zhinst.toolkit.driver.nodes.awg import AWG
from zhinst.toolkit.nodetree.helper import (
create_or_append_set_transaction,
lazy_property,
)
from zhinst.toolkit.nodetree.node import NodeList
class HDAWG(BaseInstrument):
"""High-level driver for the Zurich Instruments HDAWG."""
def enable_qccs_mode(self) -> None:
"""Configure the instrument to work with PQSC.
This method sets the reference clock source to
connect the instrument to the PQSC.
Info:
Use ``factory_reset`` to reset the changes if necessary
"""
with create_or_append_set_transaction(self._root):
# Set ZSync clock to be used as reference
self.system.clocks.referenceclock.source("zsync")
# Configure DIO
# Set interface standard to use on the 32-bit DIO to LVCMOS
self.dios[0].interface(0)
# Set DIO output values to ZSync input values.
# Forward the ZSync input values to the AWG sequencer.
# Forward the DIO input values to the ZSync output.
self.dios[0].mode("qccs")
# Drive the two most significant bytes of the DIO port
self.dios[0].drive(0b1100)
# Disable DIO triggering on the AWGs,
# since it's not needed for ZSync messages
self.awgs["*"].dio.strobe.slope("off")
self.awgs["*"].dio.valid.polarity("none")
@lazy_property
def awgs(self) -> t.Sequence[AWG]:
"""A Sequence of AWG Cores."""
return NodeList(
[
AWG(
self.root,
self._tree + ("awgs", str(i)),
self.serial,
i,
self.device_type,
self.device_options,
)
for i in range(len(self["awgs"]))
],
self._root,
self._tree + ("awgs",),
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/devices/hdawg.py | hdawg.py |
import typing as t
import numpy as np
from zhinst.toolkit.driver.devices import UHFLI
from zhinst.toolkit.nodetree import Node, NodeTree
from zhinst.toolkit.nodetree.helper import (
create_or_append_set_transaction,
lazy_property,
)
from zhinst.toolkit.nodetree.node import NodeList
from zhinst.toolkit.waveform import Waveforms
Numpy2DArray = t.TypeVar("Numpy2DArray")
class Integration(Node):
"""Integration part for the UHFQA.
Args:
root: Underlying node tree.
tree: tree (node path as tuple) of the corresponding node.
.. versionadded:: 0.3.2
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
):
super().__init__(root, tree)
def write_integration_weights(self, weights: t.Union[Waveforms, dict]) -> None:
"""Upload complex integration weights.
The weight functions are applied to the real and imaginary part of
the input signal. In the hardware the weights are implemented
as 17-bit integers.
Args:
weights: Dictionary containing the weight functions, where
keys correspond to the indices of the integration weights to be
configured.
Note:
Does not raise an error when sample limit is exceeded, but applies only
the maximum number of samples. Please refer to LabOne node documentation
for the number of maximum integration weight samples.
Note:
This function calls both `/qas/n/integration/weights/n/real` and
`/qas/n/integration/weights/n/imag` nodes.
If only real or imaginary part is defined, the number of defined samples
from the other one is zeroed.
"""
waveform_dict = {}
if isinstance(weights, Waveforms):
for slot in weights.keys():
waveform_dict[slot] = weights.get_raw_vector(slot, complex_output=True)
else:
waveform_dict = weights
with create_or_append_set_transaction(self._root):
for key, waveform in waveform_dict.items():
self.weights[key].real(np.copy(waveform.real))
self.weights[key].imag(np.copy(waveform.imag))
class QAS(Node):
"""Quantum Analyzer Channel for the UHFQA.
Args:
root: Underlying node tree.
tree: tree (node path as tuple) of the corresponding node.
"""
def __init__(
self,
root: NodeTree,
tree: tuple,
):
super().__init__(root, tree)
def crosstalk_matrix(self, matrix: Numpy2DArray = None) -> t.Optional[Numpy2DArray]:
"""Sets or gets the crosstalk matrix of the UHFQA as a 2D array.
Args:
matrix: The 2D matrix used in the digital signal
processing path to compensate for crosstalk between the
different channels. The given matrix can also be a part
of the entire 10 x 10 matrix. Its maximum dimensions
are 10 x 10 (default: None).
Returns:
If no argument is given the method returns the current
crosstalk matrix as a 2D numpy array.
Raises:
ValueError: If the matrix size exceeds the maximum size of
10 x 10
"""
if matrix is None:
m = np.zeros((10, 10))
for r in range(10):
for c in range(10):
m[r, c] = self.crosstalk.rows[r].cols[c]()
return m
else:
rows, cols = matrix.shape # type: ignore[attr-defined]
if rows > 10 or cols > 10:
raise ValueError(
f"The shape of the given matrix is {rows} x {cols}. "
"The maximum size is 10 x 10."
)
for r in range(rows):
for c in range(cols):
self.crosstalk.rows[r].cols[c](matrix[r, c]) # type: ignore[index]
return None
def adjusted_delay(self, value: int = None) -> int:
"""Set or get the adjustment in the quantum analyzer delay.
Adjusts the delay that defines the time at which the integration starts
in relation to the trigger signal of the weighted integration units.
Depending if the deskew matrix is bypassed there exists a different
default delay. This function can be used to add an additional delay to
the default delay.
Args:
value: Number of additional samples to adjust the delay. If not
specified this function will just return the additional delay
currently set.
Returns:
The adjustment in delay in units of samples.
Raises:
ValueError: If the adjusted quantum analyzer delay is outside the
allowed range of 1021 samples.
"""
# The default delay value is defined by wether the deskew matrix is
# bypassed or not.
default_delay = 184 if self.bypass.deskew() else 200
if value is None:
return self.delay() - default_delay
# Round down to greatest multiple of 4, as in LabOne.
qa_delay_user = int(value // 4) * 4
# Calculate final value of adjusted QA delay.
qa_delay_adjusted = qa_delay_user + default_delay
# Check if final delay is between 0 and 1020.
if qa_delay_adjusted not in range(0, 1021):
raise ValueError(
"The quantum analyzer delay is out of range (0 <= "
f"{qa_delay_user} + {default_delay} <= 1021)"
)
# Write the adjusted delay value to the node.
self.delay(qa_delay_adjusted)
return qa_delay_user
@lazy_property
def integration(self) -> Integration:
"""Integration.
.. versionadded:: 0.3.2
"""
return Integration(self.root, self._tree + ("integration",))
class UHFQA(UHFLI):
"""High-level driver for the Zurich Instruments UHFQA."""
def enable_qccs_mode(self) -> None:
"""Configure the instrument to work with PQSC.
This method sets the reference clock source and DIO settings
correctly to connect the instrument to the PQSC.
Info:
Use ``factory_reset`` to reset the changes if necessary
"""
with create_or_append_set_transaction(self._root):
# Use external 10 MHz clock as reference
self.system.extclk("external")
# Configure DIO to be used in a QCCS
# Clock DIO internally with a frequency of 50 MHz
self.dios[0].extclk("internal")
# Set DIO output values to QA results compatible with QCCS
self.dios[0].mode("qa_result_qccs")
# Drive the two least significant bytes of the DIO port
self.dios[0].drive(0b0011)
# Set correct DIO triggering in the AWG sequencer
self.awgs[0].dio.strobe.slope("off")
self.awgs[0].dio.valid.index(16)
self.awgs[0].dio.valid.polarity("high")
@lazy_property
def qas(self) -> t.Sequence[QAS]:
"""A Sequence of QAS."""
return NodeList(
[
QAS(self.root, self._tree + ("qas", str(i)))
for i in range(len(self["qas"]))
],
self._root,
self._tree + ("qas",),
) | zhinst-toolkit | /zhinst_toolkit-0.6.1-py3-none-any.whl/zhinst/toolkit/driver/devices/uhfqa.py | uhfqa.py |
import typing as t
import zhinst.core as zi
from zhinst.utils.device_status import DeviceStatusFlag, get_device_statuses
from zhinst.utils.exceptions import CompatibilityError
def check_dataserver_device_compatibility(daq: zi.ziDAQServer, serials: t.List[str]):
"""Check LabOne DataServer and device firmware compatibility.
Args:
daq: ziDAQServer
serials: Serials of the devices whose compatibility is checked.
Raises:
ConnectionError: If a device update is in progress.
CompatibilityError: If version compatibility issues are found.
The error message will show the actions needed per device.
.. versionadded:: 0.3
"""
statuses = get_device_statuses(daq, serials)
errors = []
for serial, flags in statuses.items():
if DeviceStatusFlag.FW_UPDATE_IN_PROGRESS in flags:
raise ConnectionError(
f"Device '{serial}' has update in progress. Wait for update to finish."
)
if DeviceStatusFlag.FW_UPGRADE_AVAILABLE in flags:
errors.append(
f"Device '{serial}' has firmware upgrade available."
"Please upgrade the device firmware."
)
if (
DeviceStatusFlag.FW_UPGRADE_REQUIRED in flags
or DeviceStatusFlag.FW_UPGRADE_USB in flags
):
errors.append(
f"Device '{serial}' requires firmware upgrade. "
"Please upgrade the device firmware."
)
if DeviceStatusFlag.FW_DOWNGRADE_AVAILABLE in flags:
errors.append(
f"Device '{serial}' has firmware downgrade available. "
"Please downgrade the device firmware or update LabOne."
)
if DeviceStatusFlag.FW_DOWNGRADE_REQUIRED in flags:
errors.append(
f"Device '{serial}' requires firmware downgrade. "
"Please downgrade the device firmware or update LabOne."
)
if errors:
raise CompatibilityError(
"LabOne and device firmware version compatibility issues were found.\n"
+ "\n".join(errors)
) | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/api_compatibility.py | api_compatibility.py |
import typing as t
from zhinst.utils import convert_awg_waveform
from zhinst.core import ziDAQServer, compile_seqc
SHFSG_MAX_SIGNAL_GENERATOR_WAVEFORM_LENGTH = 98304
SHFSG_SAMPLING_FREQUENCY = 2e9
def load_sequencer_program(
daq: ziDAQServer,
device_id: str,
channel_index: int,
sequencer_program: str,
**_,
) -> None:
"""Compiles and loads a program to a specified AWG core.
This function is composed of 4 steps:
1. Reset the awg core to ensure a clean state.
2. Compile the sequencer program with the offline compiler.
3. Upload the compiled binary elf file.
4. Validate that the upload was successful and the awg core is ready
again.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which sequencer to upload - there
is one sequencer per channel.
sequencer_program: Sequencer program to be uploaded.
Raises:
RuntimeError: If the Upload was not successfully or the device could not
process the sequencer program.
"""
# start by resetting the sequencer
daq.syncSetInt(f"/{device_id}/sgchannels/{channel_index}/awg/reset", 1)
device_type = daq.getString(f"/{device_id}/features/devtype")
device_options = daq.getString(f"/{device_id}/features/options")
elf, _ = compile_seqc(
sequencer_program, device_type, device_options, channel_index, sequencer="sg"
)
daq.setVector(f"/{device_id}/sgchannels/{channel_index}/awg/elf/data", elf)
if not daq.get(f"/{device_id}/sgchannels/{channel_index}/awg/ready"):
raise RuntimeError(
"The device did not not switch to into the ready state after the upload."
)
def enable_sequencer(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
single: t.Union[bool, int] = True,
) -> None:
"""Starts the sequencer of a specific channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which sequencer to enable - there
is one sequencer per channel.
single: Flag if the sequencer should run in single mode.
"""
sequencer_path = f"/{device_id}/sgchannels/{channel_index}/awg/"
daq.setInt(
sequencer_path + "single",
int(single),
)
if not daq.syncSetInt(sequencer_path + "enable", 1):
raise RuntimeError(
"The sequencer could not be enabled. Please ensure that the "
"sequencer program is loaded and configured correctly."
)
def upload_commandtable(
daq: ziDAQServer,
device_id: str,
channel_index: int,
command_table: str,
) -> None:
"""Uploads a command table in the form of a string to the appropriate channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which channel to upload the command
table to.
command_table: The command table to be uploaded.
"""
# upload command table
daq.setVector(
f"/{device_id}/sgchannels/{channel_index}/awg/commandtable/data",
command_table,
)
def write_to_waveform_memory(
daq: ziDAQServer,
device_id: str,
channel_index: int,
waveforms: dict,
) -> None:
"""Writes waveforms to the waveform memory of a specified sequencer.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which sequencer the waveforms below are
written to - there is one generator per channel.
waveforms (dict): Dictionary of waveforms, the key specifies the
waveform index to which to write the waveforms.
"""
waveforms_path = f"/{device_id}/sgchannels/{channel_index}/awg/waveform/waves/"
settings = []
for slot, waveform in waveforms.items():
wave_raw = convert_awg_waveform(waveform)
settings.append((waveforms_path + f"{slot}", wave_raw))
daq.set(settings)
def configure_marker_and_trigger(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
trigger_in_source: str,
trigger_in_slope: str,
marker_out_source: str,
) -> None:
"""Configures the trigger inputs and marker outputs of a specified AWG core.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'
channel_index: Index of the used SG channel.
trigger_in_source: Alias for the trigger input used by the
sequencer. For a list of available values use:
daq.help(f"/{dev_id}/sgchannels/{channel_index}/awg/auxtriggers/0/channel")
trigger_in_slope: Alias for the slope of the input trigger used
by sequencer. For a list of available values use
daq.help(f"/{dev_id}/sgchannels/{channel_index}/awg/auxtriggers/0/slope")
marker_out_source: Alias for the marker output source used by the
sequencer. For a list of available values use
daq.help(f"/{dev_id}/sgchannels/{channel_index}/marker/source")
"""
# Trigger input
settings = []
settings.append(
(
f"/{device_id}/sgchannels/{channel_index}/awg/auxtriggers/0/channel",
trigger_in_source,
)
)
settings.append(
(
f"/{device_id}/sgchannels/{channel_index}/awg/auxtriggers/0/slope",
trigger_in_slope,
)
)
# Marker output
settings.append(
(
f"/{device_id}/sgchannels/{channel_index}/marker/source",
marker_out_source,
)
)
daq.set(settings)
def configure_channel(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
enable: int,
output_range: int,
center_frequency: float,
rflf_path: int,
) -> None:
"""Configures the RF input and output of a specified channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index of the used SG channel.
enable: Whether or not to enable the channel.
output_range: Maximal range of the signal output power in dbM.
center_frequency: Center Frequency before modulation.
rflf_path: Switch between RF and LF paths.
"""
path = f"/{device_id}/sgchannels/{channel_index}/"
settings = []
settings.append((path + "output/range", output_range))
settings.append((path + "output/rflfpath", rflf_path))
if rflf_path == 1:
synth = daq.getInt(path + "synthesizer")
settings.append(
(f"/{device_id}/synthesizers/{synth}/centerfreq", center_frequency)
)
elif rflf_path == 0:
settings.append((path + "digitalmixer/centerfreq", center_frequency))
settings.append((path + "output/on", enable))
daq.set(settings)
def configure_pulse_modulation(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
enable: int,
osc_index: int = 0,
osc_frequency: float = 100e6,
phase: float = 0.0,
global_amp: float = 0.5,
gains: tuple = (1.0, -1.0, 1.0, 1.0),
sine_generator_index: int = 0,
) -> None:
"""Configure the pulse modulation.
Configures the sine generator to digitally modulate the AWG output, for
generating single sideband AWG signals.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'
channel_index: Index of the used SG channel.
enable: Enables modulation.
osc_index: Selects which oscillator to use.
osc_frequency: Oscillator frequency used to modulate the AWG
outputs. (default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
global_amp: Global scale factor for the AWG outputs. (default = 0.5)
gains: Sets the four amplitudes used for single sideband
generation. default values correspond to upper sideband with a
positive oscillator frequency. (default = (1.0, -1.0, 1.0, 1.0))
sine_generator_index: Selects which sine generator to use on a given
channel.
"""
path = f"/{device_id}/sgchannels/{channel_index}/"
settings = []
settings.append((path + f"sines/{sine_generator_index}/oscselect", osc_index))
settings.append((path + f"sines/{sine_generator_index}/phaseshift", phase))
settings.append((path + f"oscs/{osc_index}/freq", osc_frequency))
settings.append((path + "awg/modulation/enable", enable))
settings.append((path + "awg/outputamplitude", global_amp))
settings.append((path + "awg/outputs/0/gains/0", gains[0]))
settings.append((path + "awg/outputs/0/gains/1", gains[1]))
settings.append((path + "awg/outputs/1/gains/0", gains[2]))
settings.append((path + "awg/outputs/1/gains/1", gains[3]))
daq.set(settings)
def configure_sine_generation(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
enable: int,
osc_index: int = 0,
osc_frequency: float = 100e6,
phase: float = 0.0,
gains: tuple = (0.0, 1.0, 1.0, 0.0),
sine_generator_index: int = 0,
) -> None:
"""Configures the sine generator output of a specified channel.
Configures the sine generator output of a specified channel for generating
continuous wave signals without the AWG.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFSG device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index of the used SG channel.
enable: Enables the sine generator output.
osc_index: Selects which oscillator to use.
osc_frequency: Oscillator frequency used by the sine generator.
(default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
gains: Sets the four amplitudes used for single sideband.
generation. default values correspond to upper sideband with a
positive oscillator frequency. gains are set in this order:
I/sin, I/cos, Q/sin, Q/cos
(default = (0.0, 1.0, 1.0, 0.0))
sine_generator_index: Selects which sine generator to use on a given
channel.
"""
path = f"/{device_id}/sgchannels/{channel_index}/sines/{sine_generator_index}/"
settings = []
settings.append((path + "i/enable", enable))
settings.append((path + "q/enable", enable))
settings.append((path + "i/sin/amplitude", gains[0]))
settings.append((path + "i/cos/amplitude", gains[1]))
settings.append((path + "q/sin/amplitude", gains[2]))
settings.append((path + "q/cos/amplitude", gains[3]))
settings.append((path + "oscselect", osc_index))
settings.append(
(
f"/{device_id}/sgchannels/{channel_index}/oscs/{osc_index}/freq",
osc_frequency,
)
)
settings.append((path + "phaseshift", phase))
daq.set(settings) | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/shfsg.py | shfsg.py |
from dataclasses import dataclass
from enum import IntEnum
from typing import List, Tuple
import numpy as np
class SGType(IntEnum):
"""Different signal generator types used in a QCCS setup."""
HDAWG = 1
SHFSG = 2
SHFQC = 3
class QAType(IntEnum):
"""Different quantum analyzer types used in a QCCS setup."""
SHFQA = 1
SHFQC = 2
class PQSCMode(IntEnum):
"""Different handling of feedback data from the PQSC."""
REGISTER_FORWARD = 1
DECODER = 2
class FeedbackPath(IntEnum):
"""Different handling of feedback data from the PQSC.
.. versionadded:: 0.3
"""
ZSYNC = 1
INTERNAL = 3
@dataclass
class QCCSSystemDescription:
"""Describe the behavior of a QCCS system with respect to feedback latency."""
initial_latency_smpl: int
"""[samples] Minimum latency for the smallest amount of
integration samples. Always a multiple of 4."""
initial_steps: int
"""[steps] Integration length increment until the
first latency increment."""
pattern: List[Tuple[int, int]]
"""[(clock cycles, steps),...] The pattern of periodic
latency increments with respect to integration sample increments """
period_steps: int = 50
"""[steps] Period of the latency increment pattern."""
latency_in_period_step: int = 25
"""[clock cycles] Latency increment for a full period."""
rtlogger_correction: int = 0
"""[clock_cycles] Correction needed on top of the RTLogger recorded
latency to get the latency seen by the sequencer"""
def get_feedback_system_description(
generator_type: SGType,
analyzer_type: QAType,
pqsc_mode: PQSCMode = None,
feedback_path: FeedbackPath = FeedbackPath.ZSYNC,
) -> QCCSSystemDescription:
"""Returns a QCCSSysDescription object for a given configuration.
Args:
generator_type: Signal generator used (SHFSG/HDAWG).
analyzer_type: Quantum analyzer used.
pqsc_mode: Mode of operation for the PQSC.
feedback_path: Used only when the generator type is SHFQC to select
between local feedback or through PQSC
Returns:
A QCCS system description object to be used in a `QCCSFeedbackModel` object.
Raises:
ValueError: Incorrect values for 'generator_type', 'analyzer_type',
'pqsc_mode' or 'feedback_path'.
.. versionchanged:: 0.3
Added `feedback_path` argument.
"""
if analyzer_type not in [QAType.SHFQA, QAType.SHFQC]:
raise ValueError(f"Unknown quantum analyzer type ({analyzer_type})")
if (
pqsc_mode in [PQSCMode.DECODER, PQSCMode.REGISTER_FORWARD]
and feedback_path is FeedbackPath.INTERNAL
):
raise ValueError(
(
f"PQSC mode ({pqsc_mode}) incompatible ",
f"with selected feedback path ({feedback_path})",
)
)
if generator_type is SGType.HDAWG:
if feedback_path == FeedbackPath.INTERNAL:
raise ValueError(
"Internal Feedback can only be used with generator=SGType.SHFQC"
)
if pqsc_mode is PQSCMode.REGISTER_FORWARD:
return QCCSSystemDescription(
initial_latency_smpl=96,
initial_steps=5,
pattern=[(4, 9), (4, 8), (4, 8), (4, 9), (5, 8), (4, 8)],
rtlogger_correction=2,
)
if pqsc_mode is PQSCMode.DECODER:
return QCCSSystemDescription(
initial_latency_smpl=100,
initial_steps=7,
pattern=[(4, 8), (4, 9), (4, 8), (5, 8), (4, 9), (4, 8)],
rtlogger_correction=2,
)
raise ValueError(f"Unknown PQSC mode ({pqsc_mode})")
if generator_type in [SGType.SHFSG, SGType.SHFQC]:
if feedback_path is FeedbackPath.INTERNAL:
if generator_type != SGType.SHFQC:
raise ValueError(
"Internal Feedback can only be used with generator=SGType.SHFQC"
)
if analyzer_type != QAType.SHFQC:
raise ValueError(
"Internal Feedback can only be used with analyzer=QAType.SHFQC"
)
if pqsc_mode is not None:
raise ValueError(
(
"Internal Feedback can't be used with ",
f"the selected pqsc mode ({pqsc_mode})",
)
)
return QCCSSystemDescription(
initial_latency_smpl=23,
initial_steps=1,
pattern=[(1, 2)] * 25,
)
if pqsc_mode is PQSCMode.REGISTER_FORWARD:
return QCCSSystemDescription(
initial_latency_smpl=91,
initial_steps=5,
pattern=[(3, 9), (5, 8), (5, 8), (2, 9), (5, 8), (5, 8)],
rtlogger_correction=2,
)
if pqsc_mode is PQSCMode.DECODER:
return QCCSSystemDescription(
initial_latency_smpl=94,
initial_steps=7,
pattern=[(5, 8), (5, 9), (2, 8), (5, 8), (5, 9), (3, 8)],
rtlogger_correction=2,
)
raise ValueError(f"Unknown PQSC mode ({pqsc_mode})")
raise ValueError(f"Unknown signal generator type ({generator_type})")
@dataclass
class QCCSFeedbackModel:
"""A model that calculates the latency of feedback data.
Estimates are provided for the selected Signal Generator.
The 'start trigger' from the PQSC is used as starting point for
the latency estimate.
Attributes:
description: The QCCS system configuration description as returned
from get_feedback_system_description()
"""
description: QCCSSystemDescription
def get_latency(self, length: int) -> int:
"""Provide the expected latency relative to the integration length.
Args:
length: Integration length in samples
Returns:
The expected latency in AWG clock cycles
"""
# before the periodic pattern
model = np.array(
[self.description.initial_latency_smpl] * self.description.initial_steps,
dtype=np.int64,
)
# build the periodic pattern
periodic_mdl = np.array([], dtype=np.int64)
acc = 0
for lat_inc, int_steps in self.description.pattern:
acc += lat_inc
periodic_mdl = np.concatenate(
(periodic_mdl, np.array([acc] * int_steps, dtype=np.int64)),
dtype=np.int64,
)
# from integration samples to generator cc
def f_calculate_cycles():
index = length // 4
if index <= self.description.initial_steps:
return model[index - 1]
index -= self.description.initial_steps + 1
lat_full_periods = (
index // self.description.period_steps
) * self.description.latency_in_period_step # latency from full periods
index = (
index % self.description.period_steps
) # remainder within the periodic pattern
# total latency
return int(
self.description.initial_latency_smpl
+ periodic_mdl[index]
+ lat_full_periods
)
latency_clk = f_calculate_cycles()
return latency_clk + self.description.rtlogger_correction | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/feedback_model.py | feedback_model.py |
import os
import re
import time
import warnings
import socket
import typing as t
from pathlib import Path
import datetime
try:
# load_labone_mat() requires scipy.io.loadmat()
import scipy.io
except ImportError as e:
# No fallback. No complaints upon importing zhinst.utils, handle/raise
# exception when the function load_labone_mat() is called.
__SCIPY_IMPORT_ERROR = e
import numpy as np
import zhinst.core as zi
def create_api_session(
device_serial: str,
api_level: int,
server_host: str = None,
server_port: int = 8004,
*,
required_devtype: str = None,
required_options: str = None,
required_err_msg: str = None,
) -> t.Tuple[zi.ziDAQServer, str, t.Dict]:
"""Create an API session for the specified device.
Args:
device_serial: A string specifying the device serial number. For
example, 'uhf-dev2123' or 'dev2123'.
api_level: The targeted API level used by the code where the returned API
session will be used. The maximum API level you may use is defined by the
device class. HF2 only supports API level 1 and other devices support
API level 6. You should try to use the maximum level possible to enable
extended API features.
server_host: A hostname or IP address. The data server can be omitted
if the targeted device is an MF* device or a local data server is running.
In this case it will try to connect to the local data server or device
internal data server (local server has priority).
server_port: The port number of the data server. The default port is 8004.
required_devtype: Deprecated: This option will be ignored.
required_options: Deprecated: This option will be ignored.
required_err_msg: Deprecated: This option will be ignored.
Returns:
daq: An instance of the core.ziDAQServer class
(representing an API session connected to a Data Server).
device: The device's ID, this is the string that specifies the
device's node branch in the data server's node tree.
props: The device's discovery properties as returned by the
ziDiscovery get() method.
"""
if required_devtype is not None:
raise DeprecationWarning(
"required_devtype is not supported anymore and will be removed in "
"the future."
)
if required_options is not None:
raise DeprecationWarning(
"required_options is not supported anymore and will be removed in "
"the future."
)
if required_err_msg is not None:
raise DeprecationWarning(
"required_error_msg is not supported anymore and will be removed "
"in the future."
)
class SessionInfo:
"""Information about the Session."""
device_serial = None
data_server = None
interfaces = None
api_level = None
daq = None
if not device_serial.startswith("dev"):
# Assume it has a prefix (e.g. 'mf-', 'uhf-') and strip that away
prefix_end = device_serial.find("-")
if prefix_end != -1:
device_serial = device_serial[prefix_end + 1 :] # noqa: E203
else:
raise RuntimeError(
"Device serial is invalid. It should be of the form: "
"dev3225 or uhf-dev2123."
)
session_info = SessionInfo()
session_info.device_serial = device_serial
session_info.api_level = api_level
discovery = zi.ziDiscovery()
device_id = discovery.find(session_info.device_serial).lower()
discovery_info = discovery.get(device_id)
if server_host is None:
if discovery_info["serveraddress"] != "127.0.0.1" and not discovery_info[
"devicetype"
].upper().startswith("MF"):
raise DeprecationWarning(
"Please provide a server address for a data server."
)
if not discovery_info["discoverable"]:
raise RuntimeError(
"The specified device {} is not discoverable from the API."
"Please ensure the device is powered-on and visible using the "
"LabOne User Interface.".format(session_info.device_serial)
)
# Since it's an MF device the discovery should return its own data server as
# server address or it's the local data server
session_info.data_server = (
discovery_info["serveraddress"],
discovery_info["serverport"],
)
else:
session_info.data_server = (socket.gethostbyname(server_host), server_port)
session_info.interfaces = discovery_info["interfaces"]
if not discovery_info["available"]:
if (
discovery_info["serveraddress"] != session_info.data_server[0]
and discovery_info["owner"].upper() != "PCIE"
):
error_message = "Device {} is not available: ".format(
session_info.device_serial
)
if discovery_info["status"].startswith("In use"):
error_message += "In use by {}".format(discovery_info["owner"])
else:
error_message += discovery_info["status"]
raise RuntimeError(error_message)
try:
session_info.daq = zi.ziDAQServer(
session_info.data_server[0],
session_info.data_server[1],
session_info.api_level,
)
except RuntimeError as error:
raise RuntimeError(
"Failed to connect to the data server {}:"
"{}".format(session_info.data_server[0], session_info.data_server[1])
) from error
connected = False
for interface in session_info.interfaces:
try:
print(
"Trying to connect to {} on interface {}".format(
session_info.device_serial, interface
)
)
session_info.daq.connectDevice(session_info.device_serial, interface)
connected = True
print(
"Connected to {} via data server "
"{}:{} and interface {}".format(
session_info.device_serial,
session_info.data_server[0],
session_info.data_server[1],
interface,
)
)
break
except Exception:
continue
if not connected:
raise RuntimeError(
"Failed to connect device {} to "
"data server {}. Make sure the "
"device is connected and turned on".format(
session_info.device_serial, session_info.data_server
)
)
return (session_info.daq, session_info.device_serial, discovery_info)
def api_server_version_check(daq: zi.ziDAQServer) -> bool:
"""Check the consistency of the used version in the LabOne stack.
Issue a warning and return False if the release version of the API used in
the session (daq) does not have the same release version as the Data Server
(that the API is connected to). If the versions match return True.
Args:
daq (ziDAQServer): An instance of the core.ziDAQServer class
(representing an API session connected to a Data Server).
Returns:
Flag if the Versions of API and Data Server match.
Raises:
Warning: If the Versions of API and Data Server do not match.
"""
api_version = daq.version()
api_revision = daq.revision()
server_version = daq.getString("/zi/about/version")
server_revision = daq.getInt("/zi/about/revision")
if api_version != server_version:
message = (
"There is a mismatch between the versions of the API and Data Server. "
"The API reports version `{}' (revision: {}) whilst the Data Server has "
"version `{}' (revision {}). See the ``Compatibility'' Section in the "
"LabOne Programming Manual for more information.".format(
api_version, api_revision, server_version, server_revision
)
)
warnings.warn(message)
return False
return True
def default_output_mixer_channel(
discovery_props: t.Dict, output_channel: int = 0
) -> int:
"""Return an instrument's default output mixer channel.
Based on the specified `devicetype` and `options` discovery properties and
the hardware output channel.
This utility function is used by the core examples and returns a node
available under the /devX/sigouts/0/{amplitudes,enables}/ branches.
Args:
discovery_props: A device's discovery properties as returned by
ziDiscovery's get() method.
output_channel: The zero-based index of the hardware
output channel for which to return an output mixer channel.
Returns:
The zero-based index of an available signal output mixer channel.
Raises:
Exception: If an invalid signal input index was provided.
"""
# The logic below assumes the device type is one of the following.
assert discovery_props["devicetype"] in [
"HF2IS",
"HF2LI",
"UHFLI",
"UHFAWG",
"UHFQA",
"MFIA",
"MFLI",
], "Unknown device type: {}.".format(discovery_props["devicetype"])
if re.match(r"UHF(LI|AWG)", discovery_props["devicetype"]) and (
"MF" not in discovery_props["options"]
):
if output_channel == 0:
return 3
if output_channel == 1:
return 7
raise Exception(
"Invalid output channel `{}`, UHF Instruments have two signal "
"ouput channels (0, 1).".format(output_channel)
)
if re.match(r"UHFQA", discovery_props["devicetype"]):
if output_channel == 0:
return 0
if output_channel == 1:
return 1
raise Exception(
"Invalid output channel `{}`, UHF Instruments have two signal "
"ouput channels (0, 1).".format(output_channel)
)
if re.match(r"HF2LI", discovery_props["devicetype"]) and (
"MF" not in discovery_props["options"]
):
if output_channel == 0:
return 6
if output_channel == 1:
return 7
raise Exception(
"Invalid output channel `{}`, HF2 Instruments have two signal output"
"channels (0, 1).".format(output_channel)
)
if re.match(r"(MFLI|MFIA)", discovery_props["devicetype"]) and (
"MD" not in discovery_props["options"]
):
if output_channel == 0:
return 1
raise Exception(
f"Invalid output channel `{output_channel}`, MF Instruments have one "
"signal output channel (0)."
)
return 0 if output_channel == 0 else 1
def autoDetect(daq: zi.ziDAQServer, exclude: t.List[str] = None) -> str:
"""Return one of the devices connected to the Data Server.
Return a string containing the first device ID (not in the exclude list)
that is attached to the Data Server connected via daq, an instance of the
core.ziDAQServer class.
Args:
daq: An instance of the core.ziDAQServer class
(representing an API session connected to a Data Server).
exclude: A list of strings specifying devices to
exclude. autoDetect() will not return the name of a device in this
list.
Returns:
Device ID of a device connected to the Data Server not in exclude.
Raises:
RuntimeError: If no device was found.
RuntimeError: If daq is not an instance of core.ziDAQServer.
Example:
```python
import zhinst.utils
daq = zhinst.utils.autoConnect()
device = zhinst.utils.autoDetect(daq)
```
"""
if not isinstance(daq, zi.ziDAQServer):
raise RuntimeError("First argument must be an instance of core.ziDAQServer")
nodes = daq.listNodes("/", 0)
devs = [node for node in nodes if re.match("dev*", node, re.IGNORECASE)]
if exclude is None:
exclude = []
if not isinstance(exclude, list):
exclude = [exclude]
exclude = [x.lower() for x in exclude]
devs = [dev for dev in devs if dev.lower() not in exclude]
if not devs:
raise RuntimeError(
"No Device found. Make sure that the device is connected to the host via "
"USB or Ethernet and that it is switched on. It may also be necessary to "
"issue a connectDevice command."
)
# Found at least one device -> selection valid.
# Select the first one
device = devs[0].lower()
print("autoDetect selected the device", device, "for the measurement.")
return device
def devices(daq: zi.ziDAQServer) -> t.List[str]:
"""List of device_id of all devices connected to the Data Server.
Return a list of strings containing the device IDs that are attached to the
Data Server connected via daq, an instance of the core.ziDAQServer
class. Returns an empty list if no devices are found.
Args:
daq: An instance of the core.ziDAQServer class (representing an API
session connected to a Data Server).
Returns:
A list of strings of connected device IDs. The list is empty if no devices
are detected.
Raises:
RuntimeError: If daq is not an instance of core.ziDAQServer.
Example:
```python
import zhinst.utils
daq = zhinst.utils.autoConnect() # autoConnect not supported for MFLI devices
device = zhinst.utils.autoDetect(daq)
```
"""
if not isinstance(daq, zi.ziDAQServer):
raise RuntimeError("First argument must be an instance of core.ziDAQServer")
nodes = daq.listNodes("/", 0)
devs = [node for node in nodes if re.match("dev*", node, re.IGNORECASE)]
return list(x.lower() for x in list(devs))
def autoConnect(default_port: int = None, api_level: int = None) -> zi.ziDAQServer:
"""Try to connect to a Zurich Instruments Data Server (UHF,HF2 only).
Important: autoConnect() does not support MFLI devices.
Args:
default_port: The default port to use when connecting to
the Data Server (specify 8005 for the HF2 Data Server and 8004 for the
UHF Data Server)
If default_port is not specified (=None) then first try to connect to a
HF2, if no server devices are found then try to connect to an UHF.
This behaviour is useful for the API examples. If we cannot connect to
a server and/or detect a connected device raise a RuntimeError.
(default=None).
api_level: The API level to use, either 1, 4 or 5. HF2 only supports
Level 1, Level 5 is recommended for UHF and MFLI devices (default=None).
Returns:
ziDAQServer: An instance of the core.ziDAQServer class that is used
for communication to the Data Server.
Raises:
RuntimeError: If no running Data Server is found or no device is found
that is attached to a Data Server.x
"""
if default_port is None:
default_port = 8005
secondary_port = 8004
elif default_port in [8004, 8005]:
# If a port is specified, then don't try to connect to a secondary port
secondary_port = None
else:
raise RuntimeError(
f"autoConnect(): input argument default_port ({default_port}) must be "
"either 8004 or 8005."
)
if api_level is None:
# Note: level 1 used by default for both UHF and HF2, otherwise
# backwards compatibility not maintained.
api_level = 1
port_device = {8005: "HF2", 8004: "UHFLI or MFLI"}
port_valid_api_levels = {8005: [1], 8004: [1, 4, 5, 6]}
port_exception = {}
try:
assert api_level in port_valid_api_levels[default_port], (
"Invalid API level (`{}`) specified for port {} ({} devices), valid "
"API Levels: {}."
).format(
api_level,
default_port,
port_device[default_port],
port_valid_api_levels[default_port],
)
daq = zi.ziDAQServer("localhost", default_port, api_level)
devs = devices(daq)
assert devs, (
"Successfully connected to the server on port `{}`, API level `{}` but "
"devices() returned an empty list: No devices are connected to this PC."
).format(default_port, api_level)
# We have a server running and a device, we're done
print(
"autoConnect connected to a server on port",
default_port,
"using API level",
api_level,
".",
)
return daq
except (RuntimeError, AssertionError) as e:
port_exception[default_port] = e
error_msg_no_dev = str(
"Please ensure that the correct Zurich Instruments server is running for your "
"device and that your device is connected to the server (try connecting first "
"via the User Interface)."
)
# If default_port is specified as an input argument, then secondary_port is
# None. If we got here we had no success on default_port: raise an error.
if secondary_port is None:
error_msg = (
"autoConnect(): failed to connect to a running server or failed to find a "
"device connected to the server on port {} (used for {} devices). {} The "
"exception was: {}"
).format(
default_port,
port_device[default_port],
error_msg_no_dev,
port_exception[default_port],
)
raise RuntimeError(error_msg)
try:
assert api_level in port_valid_api_levels[secondary_port], (
"Invalid API level specified for port {} ({} devices), valid API "
"Levels: {}."
).format(
secondary_port,
port_device[secondary_port],
port_valid_api_levels[secondary_port],
)
daq = zi.ziDAQServer("localhost", secondary_port, api_level)
devs = devices(daq)
assert devs, (
"Successfully connected to the server on port `{}`, API level `{}` but "
"devices() returned an empty list: No devices are connected to this PC."
).format(secondary_port, api_level)
# We have a server running and a device, we're done
print(
"autoConnect connected to a server on port",
default_port,
"using API level",
api_level,
".",
)
return daq
except (RuntimeError, AssertionError) as e:
port_exception[secondary_port] = e
# If we got here we failed to connect to a device. Raise a RuntimeError.
error_msg = (
"autoConnect(): failed to connect to a running server or failed to find a "
"device connected to the server. {} The exception on port {} (used for {} "
"devices) was: {} The exception on port {} (used for {} devices) was: {}"
).format(
error_msg_no_dev,
default_port,
port_device[default_port],
port_exception[default_port],
secondary_port,
port_device[secondary_port],
port_exception[secondary_port],
)
raise RuntimeError(error_msg)
def sigin_autorange(daq: zi.ziDAQServer, device: str, in_channel: int) -> float:
"""Perform an automatic adjustment of the signal input range.
Based on the measured input signal. This utility function starts the
functionality implemented in the device's firmware and waits until it has
completed. The range is set by the firmware based on the measured input
signal's amplitude measured over approximately 100 ms.
Requirements:
A devtype that supports autorange functionality on the firmware level,
e.g., UHFLI, MFLI, MFIA.
Args:
daq: A core API session.
device: The device ID on which to perform the signal input autorange.
in_channel: The index of the signal input channel to autorange.
Returns:
Signal input range.
Raises:
AssertionError: If the functionality is not supported by the device or an
invalid in_channel was specified.
RuntimeError: If autorange functionality does not complete within the
timeout.
Example:
```python
import zhinst.utils
device_serial = 'dev2006'
(daq, _, _) = zhinst.utils.create_api_session(device_serial, 5)
input_channel = 0
zhinst.utils.sigin_autorange(daq, device_serial, input_channel)
```
"""
autorange_path = "/{}/sigins/{}/autorange".format(device, in_channel)
assert any(
re.match(autorange_path, node, re.IGNORECASE)
for node in daq.listNodes(autorange_path, 7)
), (
"The signal input autorange node `{}` was not returned by listNodes(). Please "
"check that: The device supports autorange functionality (HF2 does not), the "
"device `{}` is connected to the Data Server and that the specified input "
"channel `{}` is correct."
).format(
autorange_path, device, in_channel
)
daq.setInt(autorange_path, 1)
daq.sync() # Ensure the value has taken effect on device before continuing
# The node /device/sigins/in_channel/autorange has the value of 1 until an
# appropriate range has been configured by the device, wait until the
# autorange routing on the device has finished.
t0 = time.time()
timeout = 30
while daq.getInt(autorange_path):
time.sleep(0.010)
if time.time() - t0 > timeout:
raise RuntimeError(
"Signal input autorange failed to complete after after %.f seconds."
% timeout
)
return daq.getDouble("/{}/sigins/{}/range".format(device, in_channel))
def get_default_settings_path(daq: zi.ziDAQServer) -> str:
"""Return the default path used for settings by the ziDeviceSettings module.
Args:
daq: A core API session.
Returns:
settings_path: The default ziDeviceSettings path.
"""
device_settings = daq.deviceSettings()
settings_path = device_settings.get("path")["path"][0]
device_settings.clear()
return settings_path
def load_settings(daq: zi.ziDAQServer, device: str, filename: str) -> None:
"""Load a LabOne settings file to the specified device.
This function is synchronous; it will block until loading the settings has
finished.
Args:
daq: A core API session.
device: The device ID specifying where to load the settings, e.g., 'dev123'.
filename: The filename of the xml settings file to load. The filename can
include a relative or full path.
Raises:
RuntimeError: If loading the settings times out.
Examples:
```python
import zhinst.utils as utils
daq = utils.autoConnect()
dev = utils.autoDetect(daq)
# Then, e.g., load settings from a file in the current directory:
utils.load_settings(daq, dev, 'my_settings.xml')
# Then, e.g., load settings from the default LabOne settings path:
filename = 'default_ui.xml'
path = utils.get_default_settings_path(daq)
utils.load_settings(daq, dev, path + os.sep + filename)
```
"""
path, filename = os.path.split(filename)
filename_noext = os.path.splitext(filename)[0]
device_settings = daq.deviceSettings()
device_settings.set("device", device)
device_settings.set("filename", filename_noext)
if path:
device_settings.set("path", path)
else:
device_settings.set("path", "." + os.sep)
device_settings.set("command", "load")
try:
device_settings.execute()
t0 = time.time()
timeout = 60
while not device_settings.finished():
time.sleep(0.05)
if time.time() - t0 > timeout:
raise RuntimeError(
"Unable to load device settings after %.f seconds." % timeout
)
finally:
device_settings.clear()
def save_settings(daq: zi.ziDAQServer, device: str, filename: str) -> None:
"""Save settings from the specified device to a LabOne settings file.
This function is synchronous; it will block until saving the settings has
finished.
Args:
daq: A core API session.
device: The device ID specifying where to load the settings, e.g., 'dev123'.
filename: The filename of the LabOne xml settings file. The filename
can include a relative or full path.
Raises:
RuntimeError: If saving the settings times out.
Examples:
```python
import zhinst.utils as utils
daq = utils.autoConnect()
dev = utils.autoDetect(daq)
# Then, e.g., save settings to a file in the current directory:
utils.save_settings(daq, dev, 'my_settings.xml')
# Then, e.g., save settings to the default LabOne settings path:
filename = 'my_settings_example.xml'
path = utils.get_default_settings_path(daq)
utils.save_settings(daq, dev, path + os.sep + filename)
```
"""
path, filename = os.path.split(filename)
filename_noext = os.path.splitext(filename)[0]
device_settings = daq.deviceSettings()
device_settings.set("device", device)
device_settings.set("filename", filename_noext)
if path:
device_settings.set("path", path)
else:
device_settings.set("path", "." + os.sep)
device_settings.set("command", "save")
try:
device_settings.execute()
t0 = time.time()
timeout = 60
while not device_settings.finished():
time.sleep(0.05)
if time.time() - t0 > timeout:
raise RuntimeError(
"Unable to save device settings after %.f seconds." % timeout
)
finally:
device_settings.clear()
# The names correspond to the data in the columns of a CSV file saved by the
# LabOne User Interface. These are the names of demodulator sample fields.
LABONE_DEMOD_NAMES = (
"chunk",
"timestamp",
"x",
"y",
"freq",
"phase",
"dio",
"trigger",
"auxin0",
"auxin1",
)
LABONE_DEMOD_FORMATS = ("u8", "u8", "f8", "f8", "f8", "f8", "u4", "u4", "f8", "f8")
# The dtype to provide when creating a numpy array from LabOne demodulator data
LABONE_DEMOD_DTYPE = list(zip(LABONE_DEMOD_NAMES, LABONE_DEMOD_FORMATS))
# The names correspond to the data in the columns of a CSV file saved by the
# ziControl User Interface. These are the names of demodulator sample fields.
ZICONTROL_NAMES = ("t", "x", "y", "freq", "dio", "auxin0", "auxin1")
ZICONTROL_FORMATS = ("f8", "f8", "f8", "f8", "u4", "f8", "f8")
# The dtype to provide when creating a numpy array from ziControl-saved demodulator data
ZICONTROL_DTYPE = list(zip(ZICONTROL_NAMES, ZICONTROL_FORMATS))
def load_labone_demod_csv(
fname: t.Union[str, Path], column_names: t.List[str] = LABONE_DEMOD_NAMES
) -> np.ndarray:
"""Load a CSV file containing demodulator samples.
Load a CSV file containing demodulator samples as saved by the LabOne User
Interface into a numpy structured array.
Args:
fname: The file or filename of the CSV file to load.
column_names: A list (or tuple) of column names to load from the CSV
file. Default is to load all columns.
Returns:
sample: A numpy structured array of shape (num_points,)
whose field names correspond to the column names in the first line of the
CSV file. num_points is the number of lines in the CSV file - 1.
Example:
```python
import zhinst.utils
sample = zhinst.utils.load_labone_demod_csv(
'dev2004_demods_0_sample_00000.csv',
('timestamp', 'x', 'y'))
import matplotlib.pyplot as plt
import numpy as np
plt.plot(sample['timestamp'], np.abs(sample['x'] + 1j*sample['y']))
```
"""
assert set(column_names).issubset(
LABONE_DEMOD_NAMES
), "Invalid name in ``column_names``, valid names are: %s" % str(LABONE_DEMOD_NAMES)
cols = [
col for col, dtype in enumerate(LABONE_DEMOD_DTYPE) if dtype[0] in column_names
]
dtype = [dt for dt in LABONE_DEMOD_DTYPE if dt[0] in column_names]
sample = np.genfromtxt(
fname, delimiter=";", dtype=dtype, usecols=cols, skip_header=1
)
return sample
def load_labone_csv(fname: str) -> np.ndarray:
"""Load a csv file generated from LabOne.
Load a CSV file containing generic data as saved by the LabOne User
Interface into a numpy structured array.
Args:
fname: The filename of the CSV file to load.
Returns:
A numpy structured array of shape (num_points,) whose field names
correspond to the column names in the first line of the CSV file.
num_points is the number of lines in the CSV file - 1.
Example:
```python
import zhinst.utils
# Load the CSV file of PID error data (node: /dev2004/pids/0/error)
data = zhinst.utils.load_labone_csv('dev2004_pids_0_error_00000.csv')
import matplotlib.pyplot as plt
# Plot the error
plt.plot(data['timestamp'], data['value'])
```
"""
data = np.genfromtxt(fname, delimiter=";", dtype=None, names=True)
return data
def load_labone_mat(filename: str) -> t.Dict:
"""Load a mat file generated from LabOne.
A wrapper function for loading a MAT file as saved by the LabOne User
Interface with scipy.io's loadmat() function. This function is included
mainly to document how to work with the data structure return by
scipy.io.loadmat().
Args:
filename: the name of the MAT file to load.
Returns:
A nested dictionary containing the instrument data as
specified in the LabOne User Interface. The nested structure of ``data``
corresponds to the path of the data's node in the instrument's node
hierarchy.
Further comments:
The MAT file saved by the LabOne User Interface (UI) is a Matlab V5.0 data
file. The LabOne UI saves the specified data using native Matlab data
structures in the same format as are returned by commands in the LabOne
Matlab API. More specifically, these data structures are nested Matlab
structs, the nested structure of which correspond to the location of the
data in the instrument's node hierarchy.
Matlab structs are returned by scipy.io.loadmat() as dictionaries, the
name of the struct becomes a key in the dictionary. However, as for all
objects in MATLAB, structs are in fact arrays of structs, where a single
struct is an array of shape (1, 1). This means that each (nested)
dictionary that is returned (corresponding to a node in node hierarchy) is
loaded by scipy.io.loadmat as a 1-by-1 array and must be indexed as
such. See the ``Example`` section below.
For more information please refer to the following link:
http://docs.scipy.org/doc/scipy/reference/tutorial/io.html#matlab-structs
Example:
```python
device = 'dev88'
# See ``Further explanation`` above for a comment on the indexing:
timestamp = data[device][0,0]['demods'][0,0]['sample'][0,0]['timestamp'][0]
x = data[device][0,0]['demods'][0,0]['sample'][0,0]['x'][0]
y = data[device][0,0]['demods'][0,0]['sample'][0,0]['y'][0]
import matplotlib.pyplot as plt
import numpy as np
plt.plot(timestamp, np.abs(x + 1j*y))
# If multiple demodulator's are saved, data from the second demodulator,
# e.g., is accessed as following:
x = data[device][0,0]['demods'][0,1]['sample'][0,0]['x'][0]
```
"""
try:
data = scipy.io.loadmat(filename)
return data
except (NameError, AttributeError):
print(
"\n\n *** Please install the ``scipy`` package and verify you can use "
"scipy.io.loadmat() in order to use zhinst.utils.load_labone_mat. *** \n\n"
)
print(
"Whilst calling import scipy.io an exception was raised with the message: ",
str(__SCIPY_IMPORT_ERROR),
)
print("Whilst calling scipy.io.loadmat() the following exception was raised:")
raise
except Exception as e:
print("Unexpected exception", str(e))
raise
def load_zicontrol_csv(
filename: str, column_names: t.List[str] = ZICONTROL_NAMES
) -> np.ndarray:
"""Load a CSV file containing demodulator samples.
Load a CSV file containing demodulator samples as saved by the ziControl
User Interface into a numpy structured array.
Args:
filename: The file or filename of the CSV file to load.
column_names: A list (or tuple) of column names (demodulator sample
field names) to load from the CSV file. Default is to load all columns.
Returns:
sample: A numpy structured array of shape (num_points,)
whose field names correspond to the field names of a ziControl demodulator
sample. num_points is the number of lines in the CSV file - 1.
Example:
```python
import zhinst.utils
import matplotlib.plt as plt
import numpy as np
sample = zhinst.utils.load_labone_csv('Freq1.csv', ('t', 'x', 'y'))
plt.plot(sample['t'], np.abs(sample['x'] + 1j*sample['y']))
```
"""
assert set(column_names).issubset(
ZICONTROL_NAMES
), "Invalid name in ``column_names``, valid names are: %s" % str(ZICONTROL_NAMES)
cols = [
col for col, dtype in enumerate(ZICONTROL_DTYPE) if dtype[0] in column_names
]
dtype = [dt for dt in ZICONTROL_DTYPE if dt[0] in column_names]
sample = np.genfromtxt(filename, delimiter=",", dtype=dtype, usecols=cols)
return sample
def load_zicontrol_zibin(
filename: str, column_names: t.List[str] = ZICONTROL_NAMES
) -> np.ndarray:
"""Load a ziBin file containing demodulator samples.
Load a ziBin file containing demodulator samples as saved by the ziControl
User Interface into a numpy structured array. This is for data saved by
ziControl in binary format.
Note:
Specifying a fewer names in ``column_names`` will not result in a
speed-up as all data is loaded from the binary file by default.
Args:
filename: The filename of the .ziBin file to load.
column_names: A list (or tuple) of column names to load from the CSV
file. Default is to load all columns.
Returns:
A numpy structured array of shape (num_points,) whose field names
correspond to the field names of a ziControl demodulator sample.
num_points is the number of sample points saved in the file.
Example:
```python
import zhinst.utils
sample = zhinst.utils.load_zicontrol_zibin('Freq1.ziBin')
import matplotlib.plt as plt
import numpy as np
plt.plot(sample['t'], np.abs(sample['x'] + 1j*sample['y']))
```
"""
assert set(column_names).issubset(
ZICONTROL_NAMES
), "Invalid name in ``column_names``, valid names are: %s." % str(ZICONTROL_NAMES)
sample = np.fromfile(filename, dtype=">f8")
rem = np.size(sample) % len(ZICONTROL_NAMES)
assert rem == 0, str(
"Incorrect number of data points in ziBin file, the number of data points "
"must be divisible by the number of demodulator fields."
)
n = np.size(sample) / len(ZICONTROL_NAMES)
sample = np.reshape(sample, (n, len(ZICONTROL_NAMES))).transpose()
cols = [
col for col, dtype in enumerate(ZICONTROL_DTYPE) if dtype[0] in column_names
]
dtype = [dt for dt in ZICONTROL_DTYPE if dt[0] in column_names]
sample = np.core.records.fromarrays(sample[cols, :], dtype=dtype)
return sample
def check_for_sampleloss(timestamps: np.ndarray) -> np.ndarray:
"""Check for sample loss.
Check whether timestamps are equidistantly spaced, it not, it is an
indication that sampleloss has occurred whilst recording the demodulator
data.
This function assumes that the timestamps originate from continuously saved
demodulator data, during which the demodulator sampling rate was not
changed.
Args:
timestamps: a 1-dimensional array containing demodulator timestamps
Returns:
A 1-dimensional array indicating the indices in timestamp where
sampleloss has occurred. An empty array is returned in no sampleloss was
present.
"""
# If the second difference of the timestamps is zero, no sampleloss has occurred
index = np.where(np.diff(timestamps, n=2) > 0.1)[0] + 1
# Find the true dtimestamps (determined by the configured sampling rate)
dtimestamp = np.nan
for i in range(0, np.shape(timestamps)[0]):
# Take the sampling rate from a point where sample loss has not
# occurred.
if i not in index:
dtimestamp = timestamps[i + 1] - timestamps[i]
break
assert not np.isnan(dtimestamp)
for i in index:
warnings.warn(
"Sample loss detected at timestamps={} (index: {}, {} points).".format(
timestamps[i], i, (timestamps[i + 1] - timestamps[i]) / dtimestamp
)
)
return index
def bwtc_scaling_factor(order: int) -> float:
"""Return the appropriate scaling factor for bandwidth to timeconstant.
Conversion for the provided demodulator order.
Args:
order: demodulator order.
Returns:
Scaling factor for the bandwidth to timeconstant.
"""
if order == 1:
return 1.0
if order == 2:
return 0.643594
if order == 3:
return 0.509825
if order == 4:
return 0.434979
if order == 5:
return 0.385614
if order == 6:
return 0.349946
if order == 7:
return 0.322629
if order == 8:
return 0.300845
raise RuntimeError("Error: Order (%d) must be between 1 and 8.\n" % order)
def bw2tc(bandwidth: float, order: int) -> float:
"""Convert the demodulator 3 dB bandwidth to its equivalent timeconstant.
Args:
bandwidth: The demodulator 3dB bandwidth to convert.
order: The demodulator order (1 to 8) for which to convert the bandwidth.
Returns:
The equivalent demodulator timeconstant.
"""
return bwtc_scaling_factor(order) / (2 * np.pi * bandwidth)
def tc2bw(timeconstant: float, order: int) -> float:
"""Convert the demodulator timeconstant to its equivalent 3 dB bandwidth.
Args:
timeconstant: The equivalent demodulator timeconstant.
order: The demodulator order (1 to 8) for which to convert the
bandwidth.
Returns:
The demodulator 3dB bandwidth to convert.
"""
return bwtc_scaling_factor(order) / (2 * np.pi * timeconstant)
def systemtime_to_datetime(systemtime: int) -> datetime.datetime:
"""Convert the LabOne "systemtime" into a datetime object.
Convert the LabOne "systemtime" returned in LabOne data headers from
microseconds since Unix epoch to a datetime object with microsecond
precision.
Args:
systemtime: Labone "systemtime" returned by LabOne.
Returns:
datetime object.
"""
systemtime_sec, systemtime_microsec = divmod(systemtime, 1e6)
# Create a datetime object from epoch timestamp with 0 microseconds.
time_formated = datetime.datetime.fromtimestamp(systemtime_sec)
# Set the number of microseconds in the datetime object.
return time_formated.replace(microsecond=int(systemtime_microsec))
def disable_everything(daq: zi.ziDAQServer, device: str) -> t.List[t.Tuple[str, int]]:
"""Put the device in a known base configuration.
disable all extended functionality; disable all streaming nodes.
Args:
daq: An instance of the core.ziDAQServer class
(representing an API session connected to a Data Server).
device: The device ID specifying where to load the settings,
e.g., 'dev123'.
Returns:
A list of lists as provided to ziDAQServer's set()
command. Each sub-list forms a nodepath, value pair. This is a list of
nodes configured by the function and may be reused.
Warning:
This function is intended as a helper function for the API's
examples and it's signature or implementation may change in future releases.
"""
node_branches = daq.listNodes("/{}/".format(device), 0)
settings = []
if node_branches == []:
print("Device", device, "is not connected to the data server.")
return settings
if "aucarts" in (node.lower() for node in node_branches):
settings.append(["/{}/aucarts/*/enable".format(device), 0])
if "aupolars" in (node.lower() for node in node_branches):
settings.append(["/{}/aupolars/*/enable".format(device), 0])
if "awgs" in (node.lower() for node in node_branches):
settings.append(["/{}/awgs/*/enable".format(device), 0])
if "boxcars" in (node.lower() for node in node_branches):
settings.append(["/{}/boxcars/*/enable".format(device), 0])
if "cnts" in (node.lower() for node in node_branches):
settings.append(["/{}/cnts/*/enable".format(device), 0])
# CURRINS
if daq.listNodes("/{}/currins/0/float".format(device), 0) != []:
settings.append(["/{}/currins/*/float".format(device), 0])
if "dios" in (node.lower() for node in node_branches):
settings.append(["/{}/dios/*/drive".format(device), 0])
if "demods" in (node.lower() for node in node_branches):
settings.append(["/{}/demods/*/enable".format(device), 0])
settings.append(["/{}/demods/*/trigger".format(device), 0])
settings.append(["/{}/demods/*/sinc".format(device), 0])
settings.append(["/{}/demods/*/oscselect".format(device), 0])
settings.append(["/{}/demods/*/harmonic".format(device), 1])
settings.append(["/{}/demods/*/phaseshift".format(device), 0])
if "extrefs" in (node.lower() for node in node_branches):
settings.append(["/{}/extrefs/*/enable".format(device), 0])
if "imps" in (node.lower() for node in node_branches):
settings.append(["/{}/imps/*/enable".format(device), 0])
if "inputpwas" in (node.lower() for node in node_branches):
settings.append(["/{}/inputpwas/*/enable".format(device), 0])
if daq.listNodes("/{}/mods/0/enable".format(device), 0) != []:
# HF2 without the MOD Option has an empty MODS branch.
settings.append(["/{}/mods/*/enable".format(device), 0])
if "outputpwas" in (node.lower() for node in node_branches):
settings.append(["/{}/outputpwas/*/enable".format(device), 0])
if daq.listNodes("/{}/pids/0/enable".format(device), 0) != []:
# HF2 without the PID Option has an empty PID branch.
settings.append(["/{}/pids/*/enable".format(device), 0])
if daq.listNodes("/{}/plls/0/enable".format(device), 0) != []:
# HF2 without the PLL Option still has the PLLS branch.
settings.append(["/{}/plls/*/enable".format(device), 0])
if "sigins" in (node.lower() for node in node_branches):
settings.append(["/{}/sigins/*/ac".format(device), 0])
settings.append(["/{}/sigins/*/imp50".format(device), 0])
sigins_children = daq.listNodes("/{}/sigins/0/".format(device), 0)
for leaf in ["diff", "float"]:
if leaf in (node.lower() for node in sigins_children):
settings.append(["/{}/sigins/*/{}".format(device, leaf.lower()), 0])
if "sigouts" in (node.lower() for node in node_branches):
settings.append(["/{}/sigouts/*/on".format(device), 0])
settings.append(["/{}/sigouts/*/enables/*".format(device), 0])
settings.append(["/{}/sigouts/*/offset".format(device), 0.0])
sigouts_children = daq.listNodes("/{}/sigouts/0/".format(device), 0)
for leaf in ["add", "diff", "imp50"]:
if leaf in (node.lower() for node in sigouts_children):
settings.append(["/{}/sigouts/*/{}".format(device, leaf.lower()), 0])
if "precompensation" in (node.lower() for node in sigouts_children):
settings.append(["/{}/sigouts/*/precompensation/enable".format(device), 0])
settings.append(
["/{}/sigouts/*/precompensation/highpass/*/enable".format(device), 0]
)
settings.append(
[
"/{}/sigouts/*/precompensation/exponentials/*/enable".format(
device
),
0,
]
)
settings.append(
["/{}/sigouts/*/precompensation/bounces/*/enable".format(device), 0]
)
settings.append(
["/{}/sigouts/*/precompensation/fir/enable".format(device), 0]
)
if "scopes" in (node.lower() for node in node_branches):
settings.append(["/{}/scopes/*/enable".format(device), 0])
if daq.listNodes("/{}/scopes/0/segments/enable".format(device), 0) != []:
settings.append(["/{}/scopes/*/segments/enable".format(device), 0])
if daq.listNodes("/{}/scopes/0/stream/enables/0".format(device), 0) != []:
settings.append(["/{}/scopes/*/stream/enables/*".format(device), 0])
if "triggers" in (node.lower() for node in node_branches):
settings.append(["/{}/triggers/out/*/drive".format(device), 0])
daq.set(settings)
daq.sync()
return settings
def convert_awg_waveform(
wave1: np.ndarray, wave2: np.ndarray = None, markers: np.ndarray = None
) -> np.ndarray:
"""Convert one or multiple waveforms into the native AWG waveform format.
Converts one or multiple arrays with waveform data to the native AWG
waveform format (interleaved waves and markers as uint16).
Waveform data can be provided as integer (no conversion) or floating point
(range -1 to 1) arrays.
Args:
wave1: Array with data of waveform 1.
wave2: Array with data of waveform 2.
markers: Array with marker data.
Returns:
The converted uint16 waveform is returned.
"""
wave2_uint = None
marker_uint = None
mode = 0
# Prepare waveforms
def uint16_waveform(wave):
wave = np.asarray(wave)
if np.issubdtype(wave.dtype, np.floating):
return np.asarray((np.power(2, 15) - 1) * wave, dtype=np.uint16)
return np.asarray(wave, dtype=np.uint16)
wave1_uint = uint16_waveform(wave1)
mode += 1
if wave2 is not None:
if len(wave2) != len(wave1):
raise Exception(
"wave1 and wave2 have different length. They should have the same "
"length."
)
wave2_uint = uint16_waveform(wave2)
mode += 2
if markers is not None:
if len(markers) != len(wave1):
raise Exception(
"wave1 and marker have different length. They should have the same "
"length."
)
marker_uint = np.array(markers, dtype=np.uint16)
mode += 4
# Merge waveforms
waveform_data = None
if mode == 1:
waveform_data = wave1_uint
elif mode == 3:
waveform_data = np.vstack((wave1_uint, wave2_uint)).reshape((-2,), order="F")
elif mode == 4:
waveform_data = marker_uint
elif mode == 5:
waveform_data = np.vstack((wave1_uint, marker_uint)).reshape((-2,), order="F")
elif mode == 6:
waveform_data = np.vstack((wave2_uint, marker_uint)).reshape((-2,), order="F")
elif mode == 7:
waveform_data = np.vstack((wave1_uint, wave2_uint, marker_uint)).reshape(
(-2,), order="F"
)
else:
waveform_data = []
return waveform_data
def parse_awg_waveform(
wave_uint: np.ndarray, channels: int = 1, markers_present: bool = False
) -> t.Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Convert a native AWG waveform into the individual waves.
Converts a received waveform from the AWG waveform node into floating point
and separates its contents into the respective waves (2 waveform waves and 1
marker wave), depending on the input.
Args:
wave_uint: A uint16 array from the AWG waveform node.
channels: Number of channels present in the wave.
markers_present: Indicates if markers are interleaved in the wave.
Returns:
Three separated arrays are returned. The waveforms are scaled to be in the
range [-1 and 1]. If no data is present the respective array is empty.
"""
from collections import namedtuple
# convert uint16 to int16
wave_int = np.array(wave_uint, dtype=np.int16)
parsed_waves = namedtuple("deinterleaved_waves", ["wave1", "wave2", "markers"])
wave1 = []
wave2 = []
markers = []
interleaved_frames = channels
if markers_present:
interleaved_frames += 1
deinterleaved = [
wave_int[idx::interleaved_frames] for idx in range(interleaved_frames)
]
deinterleaved[0] = deinterleaved[0] / (np.power(2, 15) - 1)
if channels == 2:
deinterleaved[1] = deinterleaved[1] / (np.power(2, 15) - 1)
wave1 = deinterleaved[0]
if channels == 2:
wave2 = deinterleaved[1]
if markers_present:
markers = deinterleaved[-1]
return parsed_waves(wave1, wave2, markers)
def wait_for_state_change(
daq: zi.ziDAQServer,
node: str,
value: int,
timeout: float = 1.0,
sleep_time: float = 0.005,
) -> None:
"""Waits until a node has the expected state/value.
Attention: Only supports integer values as reference.
Args:
daq: A core API session.
node: Path of the node.
value: expected value.
timeout: max in seconds. (default = 1.0)
sleep_time: sleep interval in seconds. (default = 0.005)
Raises:
TimeoutError: If the node did not changed to the expected value within
the given time.
"""
start_time = time.time()
while start_time + timeout >= time.time() and daq.getInt(node) != value:
time.sleep(sleep_time)
if daq.getInt(node) != value:
raise TimeoutError(
f"{node} did not change to expected value {value} within "
f"{timeout} seconds."
)
def assert_node_changes_to_expected_value(
daq: zi.ziDAQServer,
node: str,
expected_value: t.Union[int, float, str],
sleep_time: float = 0.005,
max_repetitions: int = 200,
) -> None:
"""Polls a node until it has the the expected value.
If the node didn't change to the expected value within the maximum number
of polls an assertion error is issued.
Args:
daq: A core API session.
node: path of the node that should change to expected value
expected_value: value the node is expected to change to
sleep_time: time in seconds to wait between requesting th value
max_repetitions: max. number of loops we wait for the node to change
Raises:
AssertionError: If the node doesn't change to the expected value within
the given time.
"""
warnings.warn(
"assert_node_changes_to_expected_value is deprecated please use "
+ "wait_for_state_change instead.",
DeprecationWarning,
stacklevel=2,
)
daq.sync()
for _ in range(max_repetitions):
readback_value = daq.getInt(node)
if readback_value == expected_value:
break
time.sleep(sleep_time)
assert readback_value == expected_value, (
"Node '{}' did not return {} (but returned {}) within {} sec."
).format(node, expected_value, readback_value, max_repetitions * sleep_time)
def volt_rms_to_dbm(
volt_rms: t.Union[float, t.List[float]], input_impedance_ohm: int = 50
) -> t.Union[float, t.List[float]]:
"""Converts a Root Mean Square (RMS) voltage into a dBm power value.
Args:
volt_rms: The RMS voltage to be converted
input_impedance_ohm: The input impedance in Ohm
Returns:
The power in dBm corresponding to the volt_rms argument is returned.
"""
return 10 * np.log10((np.abs(volt_rms) ** 2) * 1e3 / input_impedance_ohm) | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/utils.py | utils.py |
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum, auto
import time
import textwrap
import math
import numpy as np
from zhinst.utils import utils, shfqa
from zhinst.core import compile_seqc
import typing as t
class _Mapping(Enum):
LIN = "linear"
LOG = "log"
class _AveragingMode(Enum):
CYCLIC = "cyclic"
SEQUENTIAL = "sequential"
class _TriggerSource(Enum):
"""
Valid trigger sources for spectroscopy
Note: the user should write the trigger selection in lowercase letters.
e.g. "software_trigger0". The strings are transformed to uppercase only
for this enum, which is needed to distinguish between internal and external
triggers (see _EXTERNAL_TRIGGER_LIMIT).
"""
CHANNEL0_TRIGGER_INPUT0 = 0 # Important: start counting with 0
CHAN0TRIGIN0 = CHANNEL0_TRIGGER_INPUT0
CHANNEL0_TRIGGER_INPUT1 = auto()
CHAN0TRIGIN1 = CHANNEL0_TRIGGER_INPUT1
CHANNEL1_TRIGGER_INPUT0 = auto()
CHAN1TRIGIN0 = CHANNEL1_TRIGGER_INPUT0
CHANNEL1_TRIGGER_INPUT1 = auto()
CHAN1TRIGIN1 = CHANNEL1_TRIGGER_INPUT1
CHANNEL2_TRIGGER_INPUT0 = auto()
CHAN2TRIGIN0 = CHANNEL2_TRIGGER_INPUT0
CHANNEL2_TRIGGER_INPUT1 = auto()
CHAN2TRIGIN1 = CHANNEL2_TRIGGER_INPUT1
CHANNEL3_TRIGGER_INPUT0 = auto()
CHAN3TRIGIN0 = CHANNEL3_TRIGGER_INPUT0
CHANNEL3_TRIGGER_INPUT1 = auto()
CHAN3TRIGIN1 = CHANNEL3_TRIGGER_INPUT1
CHANNEL0_SEQUENCER_TRIGGER0 = auto()
CHAN0SEQTRIG0 = CHANNEL0_SEQUENCER_TRIGGER0
CHANNEL1_SEQUENCER_TRIGGER0 = auto()
CHAN1SEQTRIG0 = CHANNEL1_SEQUENCER_TRIGGER0
CHANNEL2_SEQUENCER_TRIGGER0 = auto()
CHAN2SEQTRIG0 = CHANNEL2_SEQUENCER_TRIGGER0
CHANNEL3_SEQUENCER_TRIGGER0 = auto()
CHAN3SEQTRIG0 = CHANNEL3_SEQUENCER_TRIGGER0
SOFTWARE_TRIGGER0 = auto()
SWTRIG0 = SOFTWARE_TRIGGER0
INTERNAL_TRIGGER = auto()
INTTRIG = INTERNAL_TRIGGER
_EXTERNAL_TRIGGER_LIMIT = _TriggerSource.CHANNEL3_TRIGGER_INPUT1
_SHF_SAMPLE_RATE = 2e9
_MIN_SETTLING_TIME = 80e-9
_MAX_PLAYZERO_CYCLES = 2**30 - 16
_MAX_PLAYZERO_TIME = _MAX_PLAYZERO_CYCLES / _SHF_SAMPLE_RATE
def _check_trigger_source(trigger):
"""
Checks whether the trigger source exists in the _TriggerSource enumeration
Raises a ValueError exception if the checked setting was invalid.
Arguments:
trigger: the trigger source setting to be checked
"""
try:
_TriggerSource[trigger.upper()]
except ValueError:
print(
(
"Trigger source needs to be 'channel[0,3]_trigger_input[0,1]', "
"'channel[0,3]_sequencer_trigger0' or 'software_trigger0'."
)
)
def _check_channel_index(daq, device_id, channel_index):
"""
Checks whether the provided channel index is valid
Raises a ValueError exception if the checked setting was invalid.
Arguments:
channel_index: index of the qachannel to be checked
"""
device_type = daq.getString(f"/{device_id}/features/devtype")
if device_type == "SHFQA4":
num_qa_channels = 4
elif device_type == "SHFQA2":
num_qa_channels = 2
else:
# SHFQC
num_qa_channels = 1
if channel_index >= num_qa_channels:
raise ValueError(
f"Device {device_id} only has a total of {num_qa_channels} QA channels."
)
def _check_center_freq(center_freq_hz):
"""
Checks whether the center frequency is within the valid range
Raises a ValueError exception if the checked setting was invalid.
Arguments:
center_freq_hz: the center frequency to be checked in units Hz
"""
min_center_freq = 0
max_center_freq = 8e9
center_freq_steps = 100e6
rounding_error = 0.1
if center_freq_hz < min_center_freq:
raise ValueError(f"Center frequency must be greater than {min_center_freq}Hz.")
if center_freq_hz > max_center_freq:
raise ValueError(f"Center frequency must be less than {max_center_freq}Hz.")
if center_freq_hz % center_freq_steps > rounding_error:
raise ValueError(f"Center frequency must be multiple of {center_freq_steps}Hz.")
def _check_in_band_freq(start_freq, stop_freq):
"""
Checks whether the start/stop frequency for the in-band sweep is in the valid
range
Raises a ValueError exception if the checked setting was invalid.
Arguments:
start_freq:
stop_freq:
"""
min_offset_freq = -1e9
max_offset_freq = 1e9
if start_freq >= stop_freq:
raise ValueError("Stop frequency must be larger than start_freq frequency.")
if start_freq < min_offset_freq:
raise ValueError(f"Start frequency must be greater than {min_offset_freq}Hz.")
if stop_freq > max_offset_freq:
raise ValueError(f"Stop frequency must be less than {max_offset_freq}Hz.")
def _check_io_range(range_dbm, min_range):
"""
Checks whether the supplied input or output range setting is within the device
boundaries
Raises a ValueError exception if the checked setting was invalid.
Arguments:
range_dbm: the range setting to be checked in units of dBm
min_range: lower boundary
"""
max_range = 10
range_step = 5
rounding_error = 0.001
if range_dbm > max_range + rounding_error:
raise ValueError(f"Maximum range is {max_range}dBm.")
if range_dbm < min_range - rounding_error:
raise ValueError(f"Minimum range is {min_range}dBm.")
if range_dbm % range_step > rounding_error:
raise ValueError(f"Range must be multiple of {range_step}dBm.")
def _check_output_range(range_dbm):
"""
Checks whether the supplied output range setting is within the device boundaries
Raises a ValueError exception if the checked setting was invalid.
Arguments:
range_dbm: the range setting to be checked in units of dBm
"""
min_range_output = -30
_check_io_range(range_dbm, min_range_output)
def _check_input_range(range_dbm):
"""
Checks whether the supplied output range setting is within the device boundaries
Raises a ValueError exception if the checked setting was invalid.
Arguments:
range_dbm: the range setting to be checked in units of dBm
"""
min_range_input = -50
_check_io_range(range_dbm, min_range_input)
def _check_output_gain(gain):
"""
Checks whether the supplied output gain setting is within the device boundaries
Raises a ValueError exception if the checked setting was invalid.
Arguments:
gain: the gain setting to be checked
"""
max_gain = 1
min_gain = 0
if gain < min_gain or gain > max_gain:
raise ValueError(f"Output gain must be within [{min_gain}, {max_gain}].")
def _check_settling_time(settling_time):
"""
Checks whether the settling time is within the acceptable range.
Raises a ValueError exception if the checked setting was invalid.
Arguments:
settling_time: the settling time setting to be checked
"""
if settling_time < _MIN_SETTLING_TIME:
raise ValueError(
f"Settling time {settling_time} s smaller than minimum allowed value: {_MIN_SETTLING_TIME} s!"
)
if settling_time > _MAX_PLAYZERO_TIME:
raise ValueError(
f"Settling time {settling_time} s greater than maximum allowed value: {_MAX_PLAYZERO_TIME} s!"
)
def _check_wait_after_integration(wait_after_integration):
"""
Checks whether the wait time after integration is within the acceptable range.
Raises a ValueError exception if the checked setting was invalid.
Arguments:
wait_after_integration: the wait time setting to be checked
"""
if wait_after_integration < 0:
raise ValueError(
f"Wait time after integration {wait_after_integration} s"
" smaller than zero!"
)
if wait_after_integration > _MAX_PLAYZERO_TIME:
raise ValueError(
f"Wait time after integration {wait_after_integration} s"
f" greater than maximum allowed value: {_MAX_PLAYZERO_TIME} s!"
)
def _check_envelope_waveform(wave_vector):
"""
Checks whether the suplied vector is a valid envelope waveform.
Raises a ValueError exception if the checked setting was invalid.
Arguments:
wave_vector: the waveform vector to be checked
"""
if wave_vector is None:
raise ValueError("No envelope waveform specified.")
max_envelope_length = 2**16
if len(wave_vector) > max_envelope_length:
raise ValueError(
f"Envelope length exceeds maximum of {max_envelope_length} samples."
)
# Note: here, we check that the envelope vector elements are within the unit
# circle. This check is repeated by the envelope/wave node but it is
# stated here explicitly as a guidance to the user.
if np.any(np.abs(wave_vector) > 1.0):
raise ValueError(
"The absolute value of each envelope vector element must be smaller "
"than 1."
)
def _check_mapping(mapping):
"""
Checks whether the suplied mapping is a valid setting
Raises a ValueError exception if the checked setting was invalid.
Arguments:
mapping: the setting to be checked
"""
try:
_Mapping(mapping.lower())
except ValueError:
print("Mapping needs to be 'linear' or 'log'.")
def _check_avg_mode(mode):
"""
Checks whether the average mode is a valid setting
Raises a ValueError exception if the checked setting was invalid.
Arguments:
mode: the setting to be checked
"""
try:
_AveragingMode(mode.lower())
except ValueError:
print("Averaging mode needs to be 'cyclic' or 'sequential'.")
def _print_sweep_progress(current, total, freq, newline=False):
"""
Prints a line indicating the sweep progress
Arguments:
current: the current number of measurements
total: the total number of measurements
freq: the current frequency
newline: specifies whether to print a newline (True)
or else a carriage return (False) at the end of the line
"""
print(
f"Measurement ({current}/{total}) at {(freq / 1e6):.3f}MHz." + " " * 20,
end=("\r" if not newline else "\n"),
)
def _round_for_playzero(time_interval: float, sample_rate: float):
"""
Rounds a time interval to the granularity of the playZero SeqC command
Arguments:
time_interval: the time interval to be rounded for the playZero command
sample_rate: the sample rate of the instrument
Returns:
rounded the time interval
"""
playzero_granularity = 16
# round up the number of samples to multiples of playzero_granularity
num_samples = (
((round(time_interval * sample_rate) + (playzero_granularity - 1)))
// playzero_granularity
) * playzero_granularity
return num_samples / sample_rate
def _is_subscribed(daq, node_path: str) -> bool:
"""
Checks whether the daq instance is subscribed to a given node or not
Arguments:
daq (ziDAQServer): an instance of the core.ziDAQServer class
node_path: the path of the node to be checked
Returns:
True if the node is subscribed, False if not
"""
# NOTE: currently, daq.listNodes will not respect the subscribedonly flag when the
# node path does not contain a wildcard. Thus we work around this problem by
# determining the base path of the node and using a wildcard.
# Remove this workaround once the underlying bug L1-864 is fixed.
wildcard_path = "/".join(node_path.split("/")[:-1]) + "/*"
listed_nodes = daq.listNodes(wildcard_path, subscribedonly=True)
return node_path in listed_nodes
def _subscribe_with_assert(daq, node_path: str) -> bool:
"""
Subscribes to a node only if it was not already subscribed
Raises an AssertionError if the node was already subscribed
Arguments:
daq (ziDAQServer): an instance of the core.ziDAQServer class
node_path: the path of the node to be checked
"""
assert not _is_subscribed(daq, node_path), (
"The following node was already subscribed:\n"
+ node_path
+ "\n"
+ "This would lead to unexpected behavior!"
)
daq.subscribe(node_path)
@dataclass
class SweepConfig:
"""Frequency range settings for a sweep"""
start_freq: float = -300e6 #: minimum frequency for the sweep
stop_freq: float = 300e6 #: maximum frequency for the sweep
num_points: int = 100 #: number of frequency points to measure
mapping: str = "linear" #: linear or logarithmic frequency axis
oscillator_gain: float = 1 #: amplitude gain for the oscillator used for modulation
settling_time: float = _MIN_SETTLING_TIME
"""time to wait to ensure new frequency took effect in the device under test"""
wait_after_integration: float = 0.0
"""time to wait after the integration finished until the next frequency is set"""
use_sequencer: bool = True
"""specify whether to use the fast sequencer-based sweep (True) or the slower
host-driven sweep (False)"""
psd: bool = False
"""specify whether to compute the Power Spectral Density (PSD)"""
@dataclass
class RfConfig:
"""RF in- and output settings for a sweep"""
channel: int = 0 #: device channel to be used
input_range: int = -5 #: maximal Range of the Signal Input power
output_range: int = 0 #: maximal Range of the Signal Output power
center_freq: float = 5e9 #: Center Frequency of the analysis band
@dataclass
class AvgConfig:
"""Averaging settings for a sweep"""
integration_time: float = 1e-3 #: total time while samples are integrated
num_averages: int = 1 #: times to measure each frequency point
mode: str = "cyclic"
"""averaging mode, which can be "cyclic", to first scan the frequency and then
repeat, or "sequential", to average each point before changing the frequency"""
integration_delay: float = 224.0e-9
"""time delay after the trigger for the integrator to start"""
@dataclass
class TriggerConfig:
"""Settings for the trigger"""
source: str = None
"""trigger source. Please refer to the node documentation in the user manual under
/DEV.../QACHANNELS/n/GENERATOR/AUXTRIGGERS/n/CHANNEL for a list of possible sources.
The default source (None) means the repetition rate of the experiment will be
determined by the sequencer using the integration time in AvgConfig and settling
time in SweepConfig.
Further note that the software trigger is not supported for the sequencer-based
sweeps (exception see force_sw_trigger)!"""
level: float = 0.5 #: trigger level
imp50: bool = True #: trigger input impedance - 50 Ohm if True; else high impedance
force_sw_trigger: bool = False
"""if True, the sequencer program waits for the software trigger even in
sequencer-based mode. Note, however, that the ShfSweeper python class will not
generate the software trigger on its own. Thus this mode is only useful if a
separate API session issues the software triggers!"""
@dataclass
class EnvelopeConfig:
"""Settings for defining a complex envelope for pulsed spectroscopy"""
waveform: np.complex128 = None #: the complex envelope waveform vector
delay: float = 0.0 #: time delay the waveform is generated after the trigger
Config = namedtuple("Config", ["sweep", "avg", "rf", "trig"])
# pylint: disable=too-many-instance-attributes
class ShfSweeper:
"""
Class to set up and run a sweep on an SHFQA
Arguments:
daq (zhinst.core.ziDAQServer):
ziDAQServer object to communicate with a Zurich Instruments data server
dev (str):
The ID of the device to run the sweeper with. For example, `dev12004`.
"""
def __init__(self, daq, dev):
self._daq = daq
self._dev = dev
self._sweep = SweepConfig()
self._rf = RfConfig()
self._avg = AvgConfig()
self._trig = TriggerConfig()
# the envelope multiplication is enabled if and only if this member is not None
self._envelope = None
self._shf_sample_rate = _SHF_SAMPLE_RATE
self._result = []
def run(self) -> t.Dict[str, t.Any]:
"""
Perform a sweep with the specified settings.
WARNING: During the sweep the following nodes are subscribed and the sync
command is used to clear all buffers on the data server before the measurement:
/{dev}/qachannels/{rf.channel}/spectroscopy/result/acquired
/{dev}/qachannels/{rf.channel}/spectroscopy/result/data/wave
Returns:
A dictionary with measurement data of the sweep
"""
self._init_sweep()
maybe_data = self._run_freq_sweep()
return self.get_result(data=maybe_data)
def get_result(self, data=None) -> t.Dict[str, t.Any]:
"""Get the result of the sweep.
Args:
data (dict): optional data. If not provided, the data is fetched
from the device.
Returns:
A dictionary with measurement data of the last sweep.
"""
if data is None:
data = self._get_result_logger_data()
data["vector"] = self._result
if not self._sweep.use_sequencer:
data["vector"] = self._average_samples(data["vector"])
props = data["properties"]
props["centerfreq"] = self._rf.center_freq
props["startfreq"] = self._sweep.start_freq
props["stopfreq"] = self._sweep.stop_freq
props["numpoints"] = self._sweep.num_points
props["mapping"] = self._sweep.mapping
return data
def plot(self, input_impedance_ohm: float = 50.0):
"""
Plots power over frequency for last sweep
"""
import matplotlib.pyplot as plt
freq = self.get_offset_freq_vector()
freq_mhz = freq / 1e6
data = self.get_result()
if self._sweep.psd:
y_data = 10 * np.log10(np.real(data["vector"]) * 1e3 / input_impedance_ohm)
y_label = "power spectral density [dBm / Hz]"
else:
y_data = utils.volt_rms_to_dbm(
data["vector"], input_impedance_ohm=input_impedance_ohm
)
y_label = "power [dBm]"
num_subplots = 1 if self._sweep.psd else 2
fig, axs = plt.subplots(num_subplots, sharex=True)
if num_subplots == 1:
# Note: plt.subplots doesn't return a list of axes when only one subplot is requested
# but in the subsequent code we expect it to be a list - thus we create one here.
axs = [axs]
plt.xlabel("freq [MHz]")
axs[0].plot(freq_mhz, y_data)
axs[0].set(ylabel=y_label)
axs[0].grid()
if not self._sweep.psd:
phase = np.unwrap(np.angle(data["vector"]))
# Plot the phase only for non-psd measurments
axs[1].plot(freq_mhz, phase)
axs[1].set(ylabel="phase [rad]")
axs[1].grid()
fig.suptitle(f"Sweep with center frequency {self._rf.center_freq / 1e9}GHz")
plt.show()
def set_to_device(self):
"""
Transfer settings to device
"""
# First, make sure that the configuration is still valid. This is needed
# since the users might change their instance of the dataclasses
self._check_config(self._sweep, self._avg, self._rf, self._trig, self._envelope)
# set configuration to device
self._configure_rf_frontends()
if self._is_externally_triggered:
self._configure_external_trigger()
self._configure_envelope()
self._configure_spectroscopy_delay()
self._configure_integration_time()
self._configure_psd()
self._daq.sync()
def configure(
self,
sweep_config: SweepConfig = None,
avg_config: AvgConfig = None,
rf_config: RfConfig = None,
trig_config: TriggerConfig = None,
envelope_config: EnvelopeConfig = None,
):
"""
Configure and check the settings
Arguments:
sweep_config (SweepConfig, optional): @dataclass containing sweep
configuration (None: default configuration applies)
avg_config (AvgConfig, optional): @dataclass with averaging configuration
(None: default configuration applies)
rf_config (RfConfig, optional): @dataclass with RF configuration
(None: default configuration applies)
trig_config (TriggerConfig, optional): @dataclass with trigger
configuration (None: default configuration applies)
envelope_config: (EnvelopeConfig, optional): @dataclass configuring
the envelope for pulse spectroscopy (None: the multiplication with
the envelope is disabled)
"""
self._check_config(
sweep_config, avg_config, rf_config, trig_config, envelope_config
)
self._sweep = sweep_config or self._sweep
self._rf = rf_config or self._rf
self._avg = avg_config or self._avg
self._trig = trig_config or self._trig
# Note: in the case the envelope_config argument is None, the envelope
# multiplication will be disabled. Hence no "or" statement is used here.
self._envelope = envelope_config
def get_configuration(self) -> Config:
"""Get the current configuration.
Returns:
The configuration of the sweeper class as
Config(SweepConfig, AvgConfig, RfConfig, TriggerConfig)
"""
return Config(self._sweep, self._avg, self._rf, self._trig)
def get_offset_freq_vector(self):
"""
Get vector of frequency points
"""
if self._sweep.mapping == _Mapping.LIN.value:
freq_vec = np.linspace(
self._sweep.start_freq, self._sweep.stop_freq, self._sweep.num_points
)
else: # log
start_f_log = np.log10(self._sweep.start_freq + self._rf.center_freq)
stop_f_log = np.log10(self._sweep.stop_freq + self._rf.center_freq)
temp_f_vec = np.logspace(start_f_log, stop_f_log, self._sweep.num_points)
freq_vec = temp_f_vec - self._rf.center_freq
return freq_vec
def _check_config(
self,
sweep_config=None,
avg_config=None,
rf_config=None,
trig_config=None,
envelope_config=None,
):
"""
Checks if the supplied configurations are valid
This function has the same arguments as the public function self.configure()
"""
if rf_config:
_check_channel_index(self._daq, self._dev, rf_config.channel)
_check_center_freq(rf_config.center_freq)
_check_input_range(rf_config.input_range)
_check_output_range(rf_config.output_range)
if sweep_config:
if self._sweep.use_sequencer and self._sweep.mapping != _Mapping.LIN.value:
raise ValueError(
"Only linear sweeps are supported with the sequencer-based approach"
)
_check_in_band_freq(sweep_config.start_freq, sweep_config.stop_freq)
_check_mapping(sweep_config.mapping)
_check_output_gain(sweep_config.oscillator_gain)
_check_settling_time(sweep_config.settling_time)
_check_wait_after_integration(sweep_config.wait_after_integration)
if avg_config:
_check_avg_mode(avg_config.mode)
self._check_integration_time(avg_config.integration_time)
self._check_integration_delay(avg_config.integration_delay)
if trig_config and trig_config.source is not None:
_check_trigger_source(trig_config.source)
if envelope_config:
_check_envelope_waveform(envelope_config.waveform)
self._check_envelope_delay(envelope_config.delay)
@property
def _path_prefix(self) -> str:
return f"/{self._dev}/qachannels/{self._rf.channel}/"
@property
def _acquired_path(self) -> str:
return self._path_prefix + "spectroscopy/result/acquired"
@property
def _spec_enable_path(self) -> str:
return self._path_prefix + "spectroscopy/result/enable"
@property
def _data_path(self) -> str:
return self._path_prefix + "spectroscopy/result/data/wave"
@property
def _is_externally_triggered(self) -> bool:
if self._trig.source is None:
return False
return (
_TriggerSource[self._trig.source.upper()].value
<= _EXTERNAL_TRIGGER_LIMIT.value
)
@property
def _is_sw_triggered(self) -> bool:
if self._trig.source is None:
return False
return (
_TriggerSource[self._trig.source.upper()].value
== _TriggerSource.SOFTWARE_TRIGGER0.value
)
def _configure_rf_frontends(self):
"""
Configures the RF frontend settings to the device
"""
# don't set output/input on/off, keep previous user settings
self._daq.setInt(self._path_prefix + "input/range", self._rf.input_range)
self._daq.setInt(self._path_prefix + "output/range", self._rf.output_range)
self._daq.setDouble(self._path_prefix + "centerfreq", self._rf.center_freq)
self._daq.setDouble(
self._path_prefix + "oscs/0/gain", self._sweep.oscillator_gain
)
self._daq.setString(self._path_prefix + "mode", "spectroscopy")
def _configure_external_trigger(self):
"""
Configures the external trigger inputs to the device
"""
# Note: the following index arithmetic is only valid for HW triggers:
trig_channel = _TriggerSource[self._trig.source.upper()].value // 2
trig_input = _TriggerSource[self._trig.source.upper()].value % 2
trig_path = f"/{self._dev}/qachannels/{trig_channel}/triggers/{trig_input}/"
self._daq.setDouble(trig_path + "level", self._trig.level)
self._daq.setInt(trig_path + "imp50", self._trig.imp50)
def _configure_envelope(self):
"""
Configures the envelope waveform settings for pulsed spectroscopy to the device
"""
path = self._path_prefix + "spectroscopy/envelope"
if self._envelope:
self._daq.setVector(
path + "/wave", self._envelope.waveform.astype("complex128")
)
self._daq.setInt(path + "/enable", 1)
self._daq.setDouble(path + "/delay", self._envelope.delay)
else:
self._daq.setInt(path + "/enable", 0)
def _configure_spectroscopy_delay(self):
"""
Configures the delay for triggering the spectroscopy module to the device
"""
path = self._path_prefix + "spectroscopy/delay"
if self._avg:
self._daq.setDouble(path, self._avg.integration_delay)
def _configure_integration_time(self):
"""
Configure the integration time to the device
"""
spectroscopy_len = round(self._avg.integration_time * self._shf_sample_rate)
self._daq.setInt(self._path_prefix + "spectroscopy/length", spectroscopy_len)
def _configure_psd(self):
"""
Configures the Power Spectral Density feature
"""
enable_value = 1 if self._sweep.psd else 0
self._daq.setInt(self._path_prefix + "spectroscopy/psd/enable", enable_value)
def _get_freq_vec_host(self):
"""
Get the vector of frequencies for the host-based sweep
"""
single_freq_vec = self.get_offset_freq_vector()
return self._concatenate_freq_vecs_host(single_freq_vec)
def _concatenate_freq_vecs_host(self, single_freq_vec):
"""
Concatenates the vector of frequencies depending on the averaging and triggering
type for the host-based sweep
"""
triggered_sequential = (
self._avg.mode.lower() == _AveragingMode.SEQUENTIAL.value
and not self._is_sw_triggered
)
if self._avg.num_averages == 1 or triggered_sequential:
freq_vec = single_freq_vec
elif self._avg.mode == _AveragingMode.CYCLIC.value:
num_concatenate = self._avg.num_averages - 1
freq_vec = single_freq_vec
while num_concatenate > 0:
num_concatenate -= 1
freq_vec = np.concatenate((freq_vec, single_freq_vec), axis=None)
else: # sequential + sw_trigger
freq_vec = np.zeros(self._avg.num_averages * self._sweep.num_points)
for i, f in enumerate(single_freq_vec):
for j in range(self._avg.num_averages):
ind = i * self._avg.num_averages + j
freq_vec[ind] = f
return freq_vec
def _configure_direct_triggering_host(self):
"""
Configures the direct triggering of the spectroscopy module in the host-based
approach
"""
if self._trig.source is None:
raise ValueError(
"Trigger source cannot be None if use_sequencer is set to False in "
"SweepConfig"
)
self._daq.setString(
self._path_prefix + "spectroscopy/trigger/channel",
self._trig.source.lower(),
)
def _configure_triggering_via_sequencer(self):
"""
Configures the triggering of the spectroscopy module via the sequencer
"""
if self._is_sw_triggered and (not self._trig.force_sw_trigger):
raise ValueError(
textwrap.dedent(
"""
Software trigger is not supported if use_sequencer is True!
We recommend to set the trigger source in TriggerConfig to None when
using the sequencer-based sweep, in order to let the sequencer
define the repetition rate of the experiment.
"""
)
)
# the sequencer receives the actual trigger
if self._trig.source is not None:
self._daq.setString(
self._path_prefix + "generator/auxtriggers/0/channel",
self._trig.source.lower(),
)
# the spectroscopy module must use the trigger coming from the sequencer
self._daq.setString(
self._path_prefix + "spectroscopy/trigger/channel",
f"chan{self._rf.channel}seqtrig0",
)
def _init_sweep(self):
"""
Initializes the sweep by configuring all settings to the devices
"""
self.set_to_device()
self._stop_result_logger()
if self._sweep.use_sequencer:
self._configure_triggering_via_sequencer()
sequencer_program = self._generate_sequencer_program()
shfqa.load_sequencer_program(
self._daq, self._dev, self._rf.channel, sequencer_program
)
else:
self._configure_direct_triggering_host()
self._daq.sync()
def _stop_result_logger(self):
"""
Stops the result logger and makes sure it is stopped
"""
if self._daq.syncSetInt(self._spec_enable_path, 0) == 1:
raise RuntimeError("The result logger could not be stopped")
def _issue_single_sw_trigger(self):
self._daq.syncSetInt(f"/{self._dev}/system/swtriggers/0/single", 1)
def _enable_measurement(self):
self._daq.syncSetInt(self._spec_enable_path, 1)
def _get_vector_after_measurement(self):
data = self._get_result_logger_data()
return data["vector"]
def _set_freq_to_device(self, freq: float):
"""
Configures a frequency on the device
Arguments:
freq: the frequency to be configured
"""
self._daq.syncSetDouble(self._path_prefix + "oscs/0/freq", freq)
def _get_freq_sequencer(self, num_acquired: int) -> float:
"""
Infers the frequency from the number of acquired results in a sequencer-based
sweep
Arguments:
num_acquired: the current number of acquired results
Returns:
the inferred frequency
"""
if self._avg.mode == _AveragingMode.CYCLIC.value:
# Cyclic averaging
return self._sweep.start_freq + self._freq_step * (
(num_acquired - 1) % self._sweep.num_points
)
# Sequential averaging
return self._sweep.start_freq + self._freq_step * (
(num_acquired - 1) // self._avg.num_averages
)
def _poll_results(
self, data_path: str, acquired_path: str, expected_num_results: int
):
"""
Repetitively polls for results in sequencer-driven sweeps until the expected
number of results is acquired.
Raises a TimeoutError excpetion if no new result is acquired within 10 seconds.
Arguments:
data_path: path to the result data node
Must be subscribed by caller!
acquired_path: path to the "acquired" node, which reports the
current number of acquired results
Must be subscribed by caller!
expected_num_results: expected total number of results
Returns:
the result vector when it becomes available
"""
poll_time = 0.05
result_timeout = 10 # seconds
# avoid too many iterations but print often enough
print_interval = 0.5 # seconds
elapsed_time_since_result = 0
elapsed_time_since_print = print_interval # force print in first iteration
results = 0
result_logger_data = None
while elapsed_time_since_result < result_timeout:
poll_start = time.perf_counter()
poll_results = self._daq.poll(poll_time, timeout_ms=10, flat=True)
poll_duration = time.perf_counter() - poll_start
if acquired_path in poll_results:
results = poll_results[acquired_path]["value"][-1]
elapsed_time_since_result = 0
else:
elapsed_time_since_result += poll_duration
if data_path in poll_results:
result_logger_data = poll_results[data_path][0]
if elapsed_time_since_print >= print_interval:
_print_sweep_progress(
results, expected_num_results, self._get_freq_sequencer(results)
)
elapsed_time_since_print = 0
else:
elapsed_time_since_print += poll_duration
is_done = (results == expected_num_results) and (
result_logger_data is not None
)
if is_done:
# report the final progress
_print_sweep_progress(
results,
expected_num_results,
self._get_freq_sequencer(results),
newline=True,
)
return result_logger_data
if results > 0:
raise TimeoutError(
f"failed to get a new result in {result_timeout} seconds, so far "
f"only got {results}!"
)
raise TimeoutError(f"failed to get any result in {result_timeout} seconds!")
def _wait_for_results_host(self, freq, num_results):
"""
Waits for the results in the host-based sweep
Arguments:
freq: the current frequency (only needed for the status printouts)
num_results: the desired number of results to wait for
"""
poll_time = 0.05
result_timeout = 10 # seconds
# avoid too many iterations but print often enough
print_interval = 0.5 # seconds
elapsed_time_since_result = 0
elapsed_time_since_print = print_interval # force print in first iteration
results = 0
while elapsed_time_since_result < result_timeout:
poll_start = time.perf_counter()
poll_results = self._daq.poll(poll_time, timeout_ms=10, flat=True)
poll_duration = time.perf_counter() - poll_start
if self._acquired_path in poll_results:
results = poll_results[self._acquired_path]["value"][-1]
elapsed_time_since_result = 0
else:
elapsed_time_since_result += poll_duration
if elapsed_time_since_print >= print_interval:
_print_sweep_progress(results, num_results, freq)
elapsed_time_since_print = 0
else:
elapsed_time_since_print += poll_duration
if results == num_results:
# we are done - but we must report the final progress
_print_sweep_progress(results, num_results, freq)
utils.wait_for_state_change(
self._daq, self._spec_enable_path, 0, timeout=1
)
return
if results > 0:
raise TimeoutError(
f"failed to get a new result in {result_timeout} seconds, so far "
f"only got {results}!"
)
raise TimeoutError(f"failed to get any result in {result_timeout} seconds!")
def _wait_for_results_host_sw_trig(self, expected_results, wait_time=1):
"""
Waits for the results in the host-based sweep using the software trigger
Arguments:
expected_results: the expected number of results
wait_time: the expected maximal time to wait for the results
"""
# leave margin for the swtrigger and the dataserver to be updated
wait_time = 1.2 * (wait_time + 0.3)
# iterate often (20ms) to improve performance
utils.wait_for_state_change(
self._daq,
self._acquired_path,
expected_results,
timeout=wait_time,
sleep_time=0.02,
)
def _run_freq_sweep(self) -> t.Optional[t.Dict[str, t.Any]]:
"""
Runs the frequency sweep.
Dispatches between the different sweep approaches.
"""
if self._sweep.use_sequencer:
return self._run_freq_sweep_sequencer()
if self._is_sw_triggered:
return self._run_freq_sweep_host_sw_trig()
return self._run_freq_sweep_host()
def _run_freq_sweep_sequencer(self) -> t.Dict[str, t.Any]:
"""
Runs the frequency sweep with the sequencer-based approach.
"""
self._print_sweep_details()
num_results = self._configure_result_length_and_averages_sequencer()
_subscribe_with_assert(self._daq, self._data_path)
_subscribe_with_assert(self._daq, self._acquired_path)
self._daq.sync()
self._enable_measurement()
self._enable_sequencer()
try:
result_logger_data = self._poll_results(
self._data_path, self._acquired_path, num_results
)
self._result = result_logger_data["vector"]
finally:
self._daq.unsubscribe(self._data_path)
self._daq.unsubscribe(self._acquired_path)
return result_logger_data
def _run_freq_sweep_host_sw_trig(self) -> t.Dict[str, t.Any]:
"""
Runs the frequency sweep with the host-based approach using the software trigger
"""
self._print_sweep_details()
freq_vec = self._get_freq_vec_host()
self._configure_result_length_and_averages_host()
self._enable_measurement()
for i, freq in enumerate(freq_vec):
self._set_freq_to_device(freq)
_print_sweep_progress(i + 1, len(freq_vec), freq)
self._issue_single_sw_trigger()
self._wait_for_results_host_sw_trig(
expected_results=i + 1, wait_time=self._avg.integration_time
)
utils.wait_for_state_change(self._daq, self._spec_enable_path, 0, timeout=1.0)
data = self._get_result_logger_data()
self._result = data["vector"]
return data
def _run_freq_sweep_host(self) -> None:
"""
Runs the frequency sweep with the host-based approach (not software-triggered)
"""
self._print_sweep_details()
freq_vec = self._get_freq_vec_host()
num_results = self._configure_result_length_and_averages_host()
self._result = []
_subscribe_with_assert(self._daq, self._acquired_path)
self._daq.sync()
for freq in freq_vec:
self._set_freq_to_device(freq)
self._enable_measurement()
try:
self._wait_for_results_host(freq, num_results)
except Exception as wait_exception:
# make sure we also unsubscribe from the node in case of an exception
self._daq.unsubscribe(self._acquired_path)
raise wait_exception
self._result = np.append(self._result, self._get_vector_after_measurement())
# after the sweep has finished, we unsubscribe from the node
self._daq.unsubscribe(self._acquired_path)
@property
def actual_settling_time(self) -> float:
"""Wait time between setting new frequency and triggering of integration.
Note: the granularity of this time is 16 samples (8 ns).
"""
return _round_for_playzero(
self._sweep.settling_time,
sample_rate=self._shf_sample_rate,
)
@property
def actual_hold_off_time(self) -> float:
"""Wait time after triggering the integration unit until the next cycle.
Note: the granularity of this time is 16 samples (8 ns).
"""
# ensure safe hold-off time for the integration results to be written to the external RAM.
min_hold_off_time = 1032e-9
return _round_for_playzero(
max(
min_hold_off_time,
self._avg.integration_delay
+ self._avg.integration_time
+ self._sweep.wait_after_integration,
),
sample_rate=self._shf_sample_rate,
)
@property
def predicted_cycle_time(self) -> float:
"""Predicted duration of each cycle of the spectroscopy loop.
Note: this property only applies in self-triggered mode, which is active
when the trigger source is set to None and `use_sequencer` is True.
"""
return self.actual_settling_time + self.actual_hold_off_time
def _get_playzero_hold_off_samples(self) -> int:
"""
Returns the hold-off time needed per iteration of the the inner-most
loop of the SeqC program. The return value respects the minimal hold-off time
and the granularity of the playZero SeqC command.
Returns:
the number of samples corresponding to the hold-off time
"""
return round(self.actual_hold_off_time * self._shf_sample_rate)
def _get_playzero_settling_samples(self) -> int:
"""
Returns an integer number of samples corresponding to the settling time
The return value respects the granularity of the playZero SeqC command.
Returns:
the number of samples corresponding to the settling time
"""
return round(self.actual_settling_time * self._shf_sample_rate)
@property
def _freq_step(self) -> float:
"""
Returns the frequency step size according to the sweep settings
"""
return (self._sweep.stop_freq - self._sweep.start_freq) / (
self._sweep.num_points - 1
)
def _generate_sequencer_program(self):
"""
Internal method, which generates the SeqC code for a sweep
"""
seqc_header = textwrap.dedent(
f"""
const OSC0 = 0;
setTrigger(0);
configFreqSweep(OSC0, {self._sweep.start_freq}, {self._freq_step});
"""
)
seqc_wait_for_trigger = (
"waitDigTrigger(1);"
if self._trig.source is not None
else "// self-triggering mode"
)
seqc_loop_body = textwrap.dedent(
f"""
{seqc_wait_for_trigger}
// define time from setting the oscillator frequency to sending
// the spectroscopy trigger
playZero({self._get_playzero_settling_samples()});
// set the oscillator frequency depending on the loop variable i
setSweepStep(OSC0, i);
resetOscPhase();
// define time to the next iteration
playZero({self._get_playzero_hold_off_samples()});
// trigger the integration unit and pulsed playback in pulsed mode
setTrigger(1);
setTrigger(0);
"""
)
averaging_loop_arguments = f"var j = 0; j < {self._avg.num_averages}; j++"
sweep_loop_arguments = f"var i = 0; i < {self._sweep.num_points}; i++"
if self._avg.mode == _AveragingMode.CYCLIC.value:
outer_loop_arguments = averaging_loop_arguments
inner_loop_arguments = sweep_loop_arguments
else:
outer_loop_arguments = sweep_loop_arguments
inner_loop_arguments = averaging_loop_arguments
seqc = (
seqc_header
+ textwrap.dedent(
f"""
for({outer_loop_arguments}) {{
for({inner_loop_arguments}) {{"""
)
+ textwrap.indent(seqc_loop_body, " " * 8)
+ textwrap.dedent(
"""
}
}
"""
)
)
return seqc
def _enable_sequencer(self):
"""
Starts the sequencer for the sequencer-based sweep
"""
self._daq.setInt(self._path_prefix + "generator/single", 1)
self._daq.syncSetInt(self._path_prefix + "generator/enable", 1)
hundred_milliseconds = 0.1
time.sleep(hundred_milliseconds)
def _print_sweep_details(self):
detail_str = (
f"Run a sweep with {self._sweep.num_points} frequency points in the range of "
f"[{self._sweep.start_freq / 1e6}, {self._sweep.stop_freq / 1e6}] MHz + "
f"{self._rf.center_freq / 1e9} GHz. \n"
f"Mapping is {self._sweep.mapping}. \n"
f"Integration time = {self._avg.integration_time} sec. \n"
f"Measures {self._avg.num_averages} times per frequency point. \n"
f"Averaging mode is {self._avg.mode}.\n"
)
if self._trig.source is not None:
detail_str += f"Trigger source is {self._trig.source.lower()}."
else:
detail_str += str(
"Trigger source is set to None, which means the sequencer "
"defines the repetition rate."
)
print(detail_str)
def _configure_result_length_and_averages_host(self) -> int:
"""
Configures the result vector length and number of averages for the host-based
sweep to the device
Returns:
the configured number of results
"""
if self._is_sw_triggered:
num_results = self._sweep.num_points * self._avg.num_averages
elif self._avg.mode.lower() == _AveragingMode.SEQUENTIAL.value:
num_results = self._avg.num_averages
else:
num_results = 1
self._daq.setInt(self._path_prefix + "spectroscopy/result/length", num_results)
# for the host-based approach, we always average in software, thus set the
# hardware averages to 1
self._daq.setInt(self._path_prefix + "spectroscopy/result/averages", 1)
return num_results
def _configure_result_length_and_averages_sequencer(self) -> int:
"""
Configures the result vector length and number of averages for the
sequencer-based sweep to the device
Returns:
the expected total number of results, which is the product of the result
vector length and number of averages
"""
self._daq.setString(
self._path_prefix + "spectroscopy/result/mode", self._avg.mode
)
self._daq.setInt(
self._path_prefix + "spectroscopy/result/length", self._sweep.num_points
)
self._daq.setInt(
self._path_prefix + "spectroscopy/result/averages", self._avg.num_averages
)
return self._sweep.num_points * self._avg.num_averages
def _get_result_logger_data(self):
result_path = self._path_prefix + "spectroscopy/result/data/wave"
data = self._daq.get(result_path, flat=True)
return data[result_path.lower()][0]
def _average_samples(self, vec):
if self._avg.num_averages == 1:
return vec
avg_vec = np.zeros(self._sweep.num_points, dtype="complex")
if self._avg.mode == _AveragingMode.CYCLIC.value:
total_measurements = self._sweep.num_points * self._avg.num_averages
for i in range(self._sweep.num_points):
avg_range = range(i, total_measurements, self._sweep.num_points)
avg_vec[i] = np.mean(vec[avg_range])
else: # sequential
for i in range(self._sweep.num_points):
start_ind = i * self._avg.num_averages
avg_range = range(start_ind, start_ind + self._avg.num_averages)
avg_vec[i] = np.mean(vec[avg_range])
return avg_vec
def _check_integration_time(self, integration_time_s):
max_int_len = ((2**23) - 1) * 4
min_int_len = 4
max_integration_time = max_int_len / self._shf_sample_rate
min_integration_time = min_int_len / self._shf_sample_rate
if integration_time_s < min_integration_time:
raise ValueError(
f"Integration time below minimum of {min_integration_time}s."
)
if integration_time_s > max_integration_time:
raise ValueError(
f"Integration time exceeds maximum of {max_integration_time}s."
)
def _check_delay(self, resolution_ns, min_s, max_s, val_s):
if val_s > max_s or val_s < min_s:
raise ValueError(f"Delay out of bounds! {min_s} <= delay <= {max_s}")
val_ns = val_s * 1e9
val_ns_modulo = val_ns % resolution_ns
if not math.isclose(val_ns_modulo, 0.0, abs_tol=1e-3):
raise ValueError(
f"Delay {val_ns} ns not in multiples of {resolution_ns} ns."
)
def _check_integration_delay(self, integration_delay_s):
resolution_ns = 2
max_s = 131e-6
self._check_delay(resolution_ns, 0, max_s, integration_delay_s)
def _check_envelope_delay(self, delay_s):
resolution_ns = 2
max_s = 131e-6
self._check_delay(resolution_ns, 0, max_s, delay_s) | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/shf_sweeper.py | shf_sweeper.py |
import numpy as np
from zhinst.core import AwgModule, ziDAQServer
from zhinst.utils import shfqa, shfsg
def max_qubits_per_qa_channel(daq: ziDAQServer, device_id: str) -> int:
"""Returns the maximum number of supported qubits per channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
"""
return shfqa.max_qubits_per_channel(daq, device_id)
def load_sequencer_program(
daq: ziDAQServer,
device_id: str,
channel_index: int,
sequencer_program: str,
*,
channel_type: str,
awg_module: AwgModule = None,
timeout: float = 10,
) -> None:
"""Compiles and loads a program to a specified sequencer.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying to which sequencer the program below is
uploaded - there is one sequencer per channel. (Always 0 for the
qa channel)
sequencer_program: Sequencer program to be uploaded.
channel_type: Identifier specifying if the sequencer from the qa or sg
channel should be used. ("qa" or "sg")
awg_module: The standalone AWG compiler is used instead. .. deprecated:: 22.08
timeout: Maximum time to wait for the compilation on the device in
seconds.
"""
if channel_type == "qa":
return shfqa.load_sequencer_program(
daq,
device_id,
0,
sequencer_program,
awg_module=awg_module,
timeout=timeout,
)
if channel_type == "sg":
return shfsg.load_sequencer_program(
daq,
device_id,
channel_index,
sequencer_program,
awg_module=awg_module,
timeout=timeout,
)
raise ValueError(
f'channel_type was set to {channel_type} but only qa" and "sg" ' "are allowed"
)
def enable_sequencer(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
single: int,
channel_type: str,
) -> None:
"""Starts the sequencer of a specific channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which sequencer to enable - there is one
sequencer per channel. (Always 0 for the qa channel)
single: 1 - Disable sequencer after finishing execution.
0 - Restart sequencer after finishing execution.
channel_type: Identifier specifying if the sequencer from the qa or sg
channel should be used. ("qa" or "sg")
"""
if channel_type == "qa":
return shfqa.enable_sequencer(
daq,
device_id,
0,
single=single,
)
if channel_type == "sg":
return shfsg.enable_sequencer(
daq,
device_id,
channel_index,
single=single,
)
raise ValueError(
f'channel_type was set to {channel_type} but only "qa" and "sg" ' "are allowed"
)
def write_to_waveform_memory(
daq: ziDAQServer,
device_id: str,
channel_index: int,
waveforms: dict,
*,
channel_type: str,
clear_existing: bool = True,
) -> None:
"""Writes pulses to the waveform memory of a specified generator.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which sequencer the waveforms below are
written to - there is one generator per channel.
waveforms: Dictionary of waveforms, the key specifies the slot to which
to write the value which is a complex array containing the waveform
samples.
channel_type: Identifier specifying if the waveforms should be uploaded
to the qa or sg channel. ("qa" or "sg")
clear_existing: Specify whether to clear the waveform memory before the
present upload. (Only used when channel_type is "qa"!)
"""
if channel_type == "qa":
return shfqa.write_to_waveform_memory(
daq,
device_id,
channel_index,
waveforms,
clear_existing=clear_existing,
)
if channel_type == "sg":
return shfsg.write_to_waveform_memory(daq, device_id, channel_index, waveforms)
raise ValueError(
f'channel_type was set to {channel_type} but only "qa" and "sg" are allowed'
)
def configure_scope(
daq: ziDAQServer,
device_id: str,
*,
input_select: dict,
num_samples: int,
trigger_input: str,
num_segments: int = 1,
num_averages: int = 1,
trigger_delay: float = 0.0,
) -> None:
"""Configures the scope for a measurement.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
input_select: Keys (int) map a specific scope channel with a signal
source (str), e.g. "channel0_signal_input". For a list of available
values use daq.help(f"/{device_id}/scopes/0/channels/0/inputselect").
num_samples: Number of samples.
trigger_input: Specifies the trigger source of the scope acquisition
- if set to None, the self-triggering mode of the scope becomes
active, which is useful e.g. for the GUI. For a list of available
trigger values use daq.help(f"/{device_id}/scopes/0/trigger/channel").
num_segments: Number of distinct scope shots to be returned after ending
the acquisition.
num_averages: Specifies how many times each segment should be averaged
on hardware; to finish a scope acquisition, the number of issued
triggers must be equal to num_segments * num_averages.
trigger_delay: Delay in samples specifying the time between the start of
data acquisition and reception of a trigger.
"""
return shfqa.configure_scope(
daq,
device_id,
input_select=input_select,
num_samples=num_samples,
trigger_input=trigger_input,
num_segments=num_segments,
num_averages=num_averages,
trigger_delay=trigger_delay,
)
def get_scope_data(daq: ziDAQServer, device_id: str, *, timeout: float = 5.0) -> tuple:
"""Queries the scope for data once it is finished.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
timeout: Maximum time to wait for the scope data in seconds.
Returns:
Three-element tuple with:
* recorded_data (array): Contains an array per scope channel with
the recorded data.
* recorded_data_range (array): Full scale range of each scope
channel.
* scope_time (array): Relative acquisition time for each point in
recorded_data in seconds starting from 0.
"""
return shfqa.get_scope_data(daq, device_id, timeout=timeout)
def start_continuous_sw_trigger(
daq: ziDAQServer, device_id: str, *, num_triggers: int, wait_time: float
) -> None:
"""Start a continuous trigger.
Issues a specified number of software triggers with a certain wait time in
between. The function guarantees reception and proper processing of all
triggers by the device, but the time between triggers is non-deterministic
by nature of software triggering.
Warning:
Only use this function for prototyping and/or cases without strong
timing requirements.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
num_triggers: Number of triggers to be issued.
wait_time: Time between triggers in seconds.
"""
return shfqa.start_continuous_sw_trigger(
daq, device_id, num_triggers=num_triggers, wait_time=wait_time
)
def enable_scope(
daq: ziDAQServer, device_id: str, *, single: int, acknowledge_timeout: float = 1.0
) -> None:
"""Resets and enables the scope.
Blocks until the host has received the enable acknowledgment from the device.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
single: 0 = continuous mode, 1 = single-shot.
acknowledge_timeout: Maximum time to wait for diverse acknowledgments
in the implementation.
.. versionadded:: 0.1.1
"""
return shfqa.enable_scope(
daq, device_id, single=single, acknowledge_timeout=acknowledge_timeout
)
def configure_weighted_integration(
daq: ziDAQServer,
device_id: str,
*,
weights: dict,
integration_delay: float = 0.0,
clear_existing: bool = True,
) -> None:
"""Configures the weighted integration on a specified channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
weights: Dictionary containing the complex weight vectors, where keys
correspond to the indices of the integration units to be configured.
integration_delay: Delay in seconds before starting readout.
clear_existing: Specify whether to set all the integration weights to
zero before proceeding with the present upload.
"""
return shfqa.configure_weighted_integration(
daq,
device_id,
0,
weights=weights,
integration_delay=integration_delay,
clear_existing=clear_existing,
)
def configure_result_logger_for_spectroscopy(
daq: ziDAQServer,
device_id: str,
*,
result_length: int,
num_averages: int = 1,
averaging_mode: int = 0,
) -> None:
"""Configures a specified result logger for spectroscopy mode.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n.
averaging_mode: Select the averaging order of the result, with
0 = cyclic and 1 = sequential.
"""
return shfqa.configure_result_logger_for_spectroscopy(
daq,
device_id,
0,
result_length=result_length,
num_averages=num_averages,
averaging_mode=averaging_mode,
)
def configure_result_logger_for_readout(
daq: ziDAQServer,
device_id: str,
*,
result_source: str,
result_length: int,
num_averages: int = 1,
averaging_mode: int = 0,
) -> None:
"""Configures a specified result logger for readout mode.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
result_source: String-based tag to select the result source in readout
mode, e.g. "result_of_integration" or "result_of_discrimination".
result_length: Number of results to be returned by the result logger.
num_averages: Number of averages, will be rounded to 2^n.
averaging_mode: Select the averaging order of the result, with
0 = cyclic and 1 = sequential.
"""
return shfqa.configure_result_logger_for_readout(
daq,
device_id,
0,
result_source=result_source,
result_length=result_length,
num_averages=num_averages,
averaging_mode=averaging_mode,
)
def enable_result_logger(
daq: ziDAQServer, device_id: str, *, mode: str, acknowledge_timeout: float = 1.0
) -> None:
"""Resets and enables a specified result logger.
Blocks until the host has received the enable acknowledgment from the device.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
mode: Select between "spectroscopy" and "readout" mode.
acknowledge_timeout: Maximum time to wait for diverse acknowledgments
in the implementation.
.. versionadded:: 0.1.1
"""
return shfqa.enable_result_logger(
daq, device_id, 0, mode=mode, acknowledge_timeout=acknowledge_timeout
)
def get_result_logger_data(
daq: ziDAQServer,
device_id: str,
*,
mode: str,
timeout: float = 1.0,
) -> np.array:
"""Return the measured data of a specified result logger.
Blocks until the specified result logger is finished.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
mode: Select between "spectroscopy" and "readout" mode.
timeout: Maximum time to wait for data in seconds.
Returns:
Array containing the result logger data.
"""
return shfqa.get_result_logger_data(daq, device_id, 0, mode=mode, timeout=timeout)
def configure_qa_channel(
daq: ziDAQServer,
device_id: str,
*,
input_range: int,
output_range: int,
center_frequency: float,
mode: str,
) -> None:
"""Configures the RF input and output of a specified QA channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
input_range: Maximal range of the signal input power in dbM.
output_range: Maximal range of the signal output power in dbM.
center_frequency: Center Frequency of the analysis band.
mode: Select between "spectroscopy" and "readout" mode.
"""
return shfqa.configure_channel(
daq,
device_id,
0,
input_range=input_range,
output_range=output_range,
center_frequency=center_frequency,
mode=mode,
)
def configure_qa_sequencer_triggering(
daq: ziDAQServer,
device_id: str,
*,
aux_trigger: str,
play_pulse_delay: float = 0.0,
) -> None:
"""Configures the triggering of a specified sequencer.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
aux_trigger: Alias for the trigger used in the sequencer. For a list of
available values use.
daq.help(f"/{device_id}/qachannels/0/generator/auxtriggers/0/channel")
play_pulse_delay: Delay in seconds before the start of waveform playback.
"""
return shfqa.configure_sequencer_triggering(
daq,
device_id,
0,
aux_trigger=aux_trigger,
play_pulse_delay=play_pulse_delay,
)
def upload_commandtable(
daq: ziDAQServer,
device_id: str,
channel_index: int,
command_table: str,
) -> None:
"""Uploads a command table in the form of a string to the appropriate channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which SG channel to upload the command
table to.
command_table: The command table to be uploaded.
"""
return shfsg.upload_commandtable(daq, device_id, channel_index, command_table)
def configure_marker_and_trigger(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
trigger_in_source: str,
trigger_in_slope: str,
marker_out_source: str,
) -> None:
"""Configures the trigger inputs and marker outputs of a specified AWG core.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'
channel_index: Index of the used SG channel.
trigger_in_source: Alias for the trigger input used by the
sequencer. For a list of available values use:
daq.help(f"/{dev_id}/sgchannels/{channel_index}/awg/auxtriggers/0/channel")
trigger_in_slope: Alias for the slope of the input trigger used
by sequencer. For a list of available values use
daq.help(f"/{dev_id}/sgchannels/{channel_index}/awg/auxtriggers/0/slope")
marker_out_source: Alias for the marker output source used by the
sequencer. For a list of available values use
daq.help(f"/{dev_id}/sgchannels/{channel_index}/marker/source")
"""
return shfsg.configure_marker_and_trigger(
daq,
device_id,
channel_index,
trigger_in_source=trigger_in_source,
trigger_in_slope=trigger_in_slope,
marker_out_source=marker_out_source,
)
def configure_sg_channel(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
enable: int,
output_range: int,
center_frequency: float,
rflf_path: int,
) -> None:
"""Configures the RF input and output of a specified SG channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index of the used SG channel.
enable: Whether or not to enable the channel.
output_range: Maximal range of the signal output power in dbM.
center_frequency: Center Frequency before modulation.
rflf_path: Switch between RF and LF paths.
"""
return shfsg.configure_channel(
daq,
device_id,
channel_index,
enable=enable,
output_range=output_range,
center_frequency=center_frequency,
rflf_path=rflf_path,
)
def configure_pulse_modulation(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
enable: int,
osc_index: int = 0,
osc_frequency: float = 100e6,
phase: float = 0.0,
global_amp: float = 0.5,
gains: tuple = (1.0, -1.0, 1.0, 1.0),
sine_generator_index: int = 0,
) -> None:
"""Configure the pulse modulation.
Configures the sine generator to digitally modulate the AWG output, for
generating single sideband AWG signals.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'
channel_index: Index of the used SG channel.
enable: Enables modulation.
osc_index: Selects which oscillator to use.
osc_frequency: Oscillator frequency used to modulate the AWG
outputs. (default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
global_amp: Global scale factor for the AWG outputs. (default = 0.5)
gains: Sets the four amplitudes used for single sideband
generation. default values correspond to upper sideband with a
positive oscillator frequency. (default = (1.0, -1.0, 1.0, 1.0))
sine_generator_index: Selects which sine generator to use on a given
channel.
"""
return shfsg.configure_pulse_modulation(
daq,
device_id,
channel_index,
enable=enable,
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
global_amp=global_amp,
gains=gains,
sine_generator_index=sine_generator_index,
)
def configure_sine_generation(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
enable: int,
osc_index: int = 0,
osc_frequency: float = 100e6,
phase: float = 0.0,
gains: tuple = (0.0, 1.0, 1.0, 0.0),
sine_generator_index: int = 0,
) -> None:
"""Configures the sine generator output of a specified SG channel.
Configures the sine generator output of a specified channel for generating
continuous wave signals without the AWG.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQC device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index of the used SG channel.
enable: Enables the sine generator output.
osc_index: Selects which oscillator to use.
osc_frequency: Oscillator frequency used by the sine generator.
(default = 100e6)
phase: Sets the oscillator phase. (default = 0.0)
gains: Sets the four amplitudes used for single sideband.
generation. default values correspond to upper sideband with a
positive oscillator frequency. gains are set in this order:
I/sin, I/cos, Q/sin, Q/cos
(default = (0.0, 1.0, 1.0, 0.0))
sine_generator_index: Selects which sine generator to use on a given
channel.
"""
return shfsg.configure_sine_generation(
daq,
device_id,
channel_index,
enable=enable,
osc_index=osc_index,
osc_frequency=osc_frequency,
phase=phase,
gains=gains,
sine_generator_index=sine_generator_index,
) | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/shfqc/shfqc.py | shfqc.py |
import time
import numpy as np
from zhinst.utils.utils import wait_for_state_change
from zhinst.core import ziDAQServer, compile_seqc
SHFQA_MAX_SIGNAL_GENERATOR_WAVEFORM_LENGTH = 4 * 2**10
SHFQA_MAX_SIGNAL_GENERATOR_CARRIER_COUNT = 16
SHFQA_SAMPLING_FREQUENCY = 2e9
def max_qubits_per_channel(daq: ziDAQServer, device_id: str) -> int:
"""Returns the maximum number of supported qubits per channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
"""
return len(daq.listNodes(f"/{device_id}/qachannels/0/readout/integration/weights"))
def load_sequencer_program(
daq: ziDAQServer,
device_id: str,
channel_index: int,
sequencer_program: str,
**_,
) -> None:
"""Compiles and loads a program to a specified sequencer.
This function is composed of 4 steps:
1. Reset the generator to ensure a clean state.
2. Compile the sequencer program with the offline compiler.
3. Upload the compiled binary elf file.
4. Validate that the upload was successful and the generator is ready
again.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying to which sequencer the program below is
uploaded - there is one sequencer per channel.
sequencer_program: Sequencer program to be uploaded.
Raises:
RuntimeError: If the Upload was not successfully or the device could not
process the sequencer program.
"""
# Start by resetting the sequencer.
daq.syncSetInt(
f"/{device_id}/qachannels/{channel_index}/generator/reset",
1,
)
# Compile the sequencer program.
device_type = daq.getString(f"/{device_id}/features/devtype")
device_options = daq.getString(f"/{device_id}/features/options")
elf, _ = compile_seqc(
sequencer_program, device_type, device_options, channel_index, sequencer="qa"
)
# Upload the binary elf file to the device.
daq.setVector(f"/{device_id}/qachannels/{channel_index}/generator/elf/data", elf)
# Validate that the upload was successful and the generator is ready again.
if not daq.get(
f"/{device_id}/qachannels/{channel_index}/generator/ready",
):
raise RuntimeError(
"The device did not not switch to into the ready state after the upload."
)
def configure_scope(
daq: ziDAQServer,
device_id: str,
*,
input_select: dict,
num_samples: int,
trigger_input: str,
num_segments: int = 1,
num_averages: int = 1,
trigger_delay: float = 0.0,
) -> None:
"""Configures the scope for a measurement.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
input_select: Keys (int) map a specific scope channel with a signal
source (str), e.g. "channel0_signal_input". For a list of available
values use daq.help(f"/{device_id}/scopes/0/channels/0/inputselect").
num_samples: Number of samples in the scope shot.
trigger_input: Specifies the trigger source of the scope acquisition
- if set to None, the self-triggering mode of the scope becomes
active, which is useful e.g. for the GUI. For a list of available
trigger values use daq.help(f"/{device_id}/scopes/0/trigger/channel").
num_segments: Number of distinct scope shots to be returned after ending
the acquisition.
num_averages: Specifies how many times each segment should be averaged
on hardware; to finish a scope acquisition, the number of issued
triggers must be equal to num_segments * num_averages.
trigger_delay: Delay in samples specifying the time between the start of
data acquisition and reception of a trigger.
"""
scope_path = f"/{device_id}/scopes/0/"
settings = []
settings.append((scope_path + "segments/count", num_segments))
if num_segments > 1:
settings.append((scope_path + "segments/enable", 1))
else:
settings.append((scope_path + "segments/enable", 0))
if num_averages > 1:
settings.append((scope_path + "averaging/enable", 1))
else:
settings.append((scope_path + "averaging/enable", 0))
settings.append((scope_path + "averaging/count", num_averages))
settings.append((scope_path + "channels/*/enable", 0))
for channel, selected_input in input_select.items():
settings.append(
(scope_path + f"channels/{channel}/inputselect", selected_input)
)
settings.append((scope_path + f"channels/{channel}/enable", 1))
settings.append((scope_path + "trigger/delay", trigger_delay))
if trigger_input is not None:
settings.append((scope_path + "trigger/channel", trigger_input))
settings.append((scope_path + "trigger/enable", 1))
else:
settings.append((scope_path + "trigger/enable", 0))
settings.append((scope_path + "length", num_samples))
daq.set(settings)
def get_scope_data(daq: ziDAQServer, device_id: str, *, timeout: float = 5.0) -> tuple:
"""Queries the scope for data once it is finished.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
timeout: Maximum time to wait for the scope data in seconds.
Returns:
Three-element tuple with:
* recorded_data (array): Contains an array per scope channel with
the recorded data.
* recorded_data_range (array): Full scale range of each scope
channel.
* scope_time (array): Relative acquisition time for each point in
recorded_data in seconds starting from 0.
"""
# wait until scope has been triggered
wait_for_state_change(daq, f"/{device_id}/scopes/0/enable", 0, timeout=timeout)
# read and post-process the recorded data
recorded_data = [[], [], [], []]
recorded_data_range = [0.0, 0.0, 0.0, 0.0]
num_bits_of_adc = 14
max_adc_range = 2 ** (num_bits_of_adc - 1)
channels = range(4)
for channel in channels:
if daq.getInt(f"/{device_id}/scopes/0/channels/{channel}/enable"):
path = f"/{device_id}/scopes/0/channels/{channel}/wave"
data = daq.get(path.lower(), flat=True)
vector = data[path]
recorded_data[channel] = vector[0]["vector"]
averagecount = vector[0]["properties"]["averagecount"]
scaling = vector[0]["properties"]["scaling"]
voltage_per_lsb = scaling * averagecount
recorded_data_range[channel] = voltage_per_lsb * max_adc_range
# generate the time base
scope_time = [[], [], [], []]
decimation_rate = 2 ** daq.getInt(f"/{device_id}/scopes/0/time")
sampling_rate = SHFQA_SAMPLING_FREQUENCY / decimation_rate # [Hz]
for channel in channels:
scope_time[channel] = (
np.array(range(0, len(recorded_data[channel]))) / sampling_rate
)
return recorded_data, recorded_data_range, scope_time
def enable_sequencer(
daq: ziDAQServer, device_id: str, channel_index: int, *, single: int
) -> None:
"""Starts the sequencer of a specific channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which sequencer to enable - there is one
sequencer per channel.
single: 1 - Disable sequencer after finishing execution.
0 - Restart sequencer after finishing execution.
"""
generator_path = f"/{device_id}/qachannels/{channel_index}/generator/"
daq.setInt(
generator_path + "single",
single,
)
daq.syncSetInt(generator_path + "enable", 1)
hundred_milliseconds = 0.1
time.sleep(hundred_milliseconds)
def write_to_waveform_memory(
daq: ziDAQServer,
device_id: str,
channel_index: int,
waveforms: dict,
*,
clear_existing: bool = True,
) -> None:
"""Writes pulses to the waveform memory of a specified generator.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which generator the waveforms below are
written to - there is one generator per channel.
waveforms: Dictionary of waveforms, the key specifies the slot to which
to write the value which is a complex array containing the waveform
samples.
clear_existing: Specify whether to clear the waveform memory before the
present upload.
"""
generator_path = f"/{device_id}/qachannels/{channel_index}/generator/"
if clear_existing:
daq.syncSetInt(generator_path + "clearwave", 1)
settings = []
for slot, waveform in waveforms.items():
settings.append((generator_path + f"waveforms/{slot}/wave", waveform))
daq.set(settings)
def start_continuous_sw_trigger(
daq: ziDAQServer, device_id: str, *, num_triggers: int, wait_time: float
) -> None:
"""Issues a specified number of software triggers.
Issues a specified number of software triggers with a certain wait time in
between. The function guarantees reception and proper processing of all
triggers by the device, but the time between triggers is non-deterministic
by nature of software triggering.
Warning:
Only use this function for prototyping and/or cases without strong
timing requirements.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
num_triggers: Number of triggers to be issued.
wait_time: Time between triggers in seconds.
"""
min_wait_time = 0.02
wait_time = max(min_wait_time, wait_time)
for _ in range(num_triggers):
# syncSetInt() is a blocking call with non-deterministic execution time that
# imposes a minimum time between two software triggers.
daq.syncSetInt(f"/{device_id}/system/swtriggers/0/single", 1)
time.sleep(wait_time)
def enable_scope(
daq: ziDAQServer, device_id: str, *, single: int, acknowledge_timeout: float = 1.0
) -> None:
"""Resets and enables the scope.
Blocks until the host has received the enable acknowledgment from the
device.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
single: 0 = continuous mode, 1 = single-shot.
acknowledge_timeout: Maximum time to wait for diverse acknowledgments
in the implementation.
.. versionadded:: 0.1.1
"""
daq.setInt(f"/{device_id}/scopes/0/single", single)
path = f"/{device_id}/scopes/0/enable"
if daq.getInt(path) == 1 and daq.syncSetInt(path, 0) != 0:
raise RuntimeError(
f"Failed to disable the scope for device {device_id} before enabling it."
)
if daq.syncSetInt(path, 1) != 1:
raise RuntimeError(f"The scope for device {device_id} could not be enabled")
def configure_weighted_integration(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
weights: dict,
integration_delay: float = 0.0,
clear_existing: bool = True,
) -> None:
"""Configures the weighted integration on a specified channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which group of integration units the
integration weights should be uploaded to - each channel is
associated with a number of integration units that depend on
available device options. Please refer to the SHFQA manual for more
details.
weights: Dictionary containing the complex weight vectors, where keys
correspond to the indices of the integration units to be configured.
integration_delay: Delay in seconds before starting readout.
clear_existing: Specify whether to set all the integration weights to
zero before proceeding with the present upload.
"""
assert len(weights) > 0, "'weights' cannot be empty."
integration_path = f"/{device_id}/qachannels/{channel_index}/readout/integration/"
if clear_existing:
daq.syncSetInt(integration_path + "clearweight", 1)
settings = []
for integration_unit, weight in weights.items():
settings.append((integration_path + f"weights/{integration_unit}/wave", weight))
integration_length = len(next(iter(weights.values())))
settings.append((integration_path + "length", integration_length))
settings.append((integration_path + "delay", integration_delay))
daq.set(settings)
def configure_result_logger_for_spectroscopy(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
result_length: int,
num_averages: int = 1,
averaging_mode: int = 0,
) -> None:
"""Configures a specified result logger for spectroscopy mode.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which result logger to configure - there
is one result logger per channel.
result_length: Number of results to be returned by the result logger
num_averages: Number of averages, will be rounded to 2^n.
averaging_mode: Select the averaging order of the result, with
0 = cyclic and 1 = sequential.
"""
result_path = f"/{device_id}/qachannels/{channel_index}/spectroscopy/result/"
settings = []
settings.append((result_path + "length", result_length))
settings.append((result_path + "averages", num_averages))
settings.append((result_path + "mode", averaging_mode))
daq.set(settings)
def configure_result_logger_for_readout(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
result_source: str,
result_length: int,
num_averages: int = 1,
averaging_mode: int = 0,
) -> None:
"""Configures a specified result logger for readout mode.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which result logger to configure - there
is one result logger per channel.
result_source: String-based tag to select the result source in readout
mode, e.g. "result_of_integration" or "result_of_discrimination".
result_length: Number of results to be returned by the result logger.
num_averages: Number of averages, will be rounded to 2^n.
averaging_mode: Select the averaging order of the result, with
0 = cyclic and 1 = sequential.
"""
result_path = f"/{device_id}/qachannels/{channel_index}/readout/result/"
settings = []
settings.append((result_path + "length", result_length))
settings.append((result_path + "averages", num_averages))
settings.append((result_path + "source", result_source))
settings.append((result_path + "mode", averaging_mode))
daq.set(settings)
def enable_result_logger(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
mode: str,
acknowledge_timeout: float = 1.0,
) -> None:
"""Resets and enables a specified result logger.
Blocks until the host has received the enable acknowledgment from the
device.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which result logger to enable - there is
one result logger per channel.
mode: Select between "spectroscopy" and "readout" mode.
acknowledge_timeout: Maximum time to wait for diverse acknowledgments in
the implementation.
.. versionadded:: 0.1.1
"""
enable_path = f"/{device_id}/qachannels/{channel_index}/{mode}/result/enable"
# reset the result logger if some old measurement is still running
if daq.getInt(enable_path) == 1 and daq.syncSetInt(enable_path, 0) != 0:
raise RuntimeError(f"Failed to disable the result logger for {mode} mode.")
# enable the result logger
if daq.syncSetInt(enable_path, 1) != 1:
raise RuntimeError(
f"Failed to enable the result logger for {mode} mode. "
f"Please make sure that the QA channel mode is set to {mode}."
)
def get_result_logger_data(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
mode: str,
timeout: float = 1.0,
) -> np.array:
"""Return the measured data of a specified result logger.
Blocks until the specified result logger is finished.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which result logger to query results
from - there is one result logger per channel.
mode: Select between "spectroscopy" and "readout" mode.
timeout: Maximum time to wait for data in seconds.
Returns:
Array containing the result logger data.
"""
try:
wait_for_state_change(
daq,
f"/{device_id}/qachannels/{channel_index}/{mode}/result/enable",
0,
timeout=timeout,
)
except TimeoutError as error:
raise TimeoutError(
"The result logger is still running. "
"This usually indicates that it did not receive the expected number of "
"triggers."
) from error
data = daq.get(
f"/{device_id}/qachannels/{channel_index}/{mode}/result/data/*/wave",
flat=True,
)
result = np.array([d[0]["vector"] for d in data.values()])
return result
def configure_channel(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
input_range: int,
output_range: int,
center_frequency: float,
mode: str,
) -> None:
"""Configures the RF input and output of a specified channel.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying which channel to configure.
input_range: Maximal range of the signal input power in dbM.
output_range: Maximal range of the signal output power in dbM.
center_frequency: Center Frequency of the analysis band.
mode: Select between "spectroscopy" and "readout" mode.
"""
path = f"/{device_id}/qachannels/{channel_index}/"
settings = []
settings.append((path + "input/range", input_range))
settings.append((path + "output/range", output_range))
settings.append((path + "centerfreq", center_frequency))
settings.append((path + "mode", mode))
daq.set(settings)
def configure_sequencer_triggering(
daq: ziDAQServer,
device_id: str,
channel_index: int,
*,
aux_trigger: str,
play_pulse_delay: float = 0.0,
) -> None:
"""Configures the triggering of a specified sequencer.
Args:
daq: Instance of a Zurich Instruments API session connected to a Data
Server. The device with identifier device_id is assumed to already
be connected to this instance.
device_id: SHFQA device identifier, e.g. `dev12004` or 'shf-dev12004'.
channel_index: Index specifying on which sequencer to configure the
triggering - there is one sequencer per channel.
aux_trigger: Alias for the trigger used in the sequencer. For a list of
available values use.
daq.help(f"/{device_id}/qachannels/0/generator/auxtriggers/0/channel")
play_pulse_delay: Delay in seconds before the start of waveform playback.
"""
daq.setString(
f"/{device_id}/qachannels/{channel_index}/generator/auxtriggers/0/channel",
aux_trigger,
)
daq.setDouble(
f"/{device_id}/qachannels/{channel_index}/generator/delay",
play_pulse_delay,
) | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/shfqa/shfqa.py | shfqa.py |
import typing as t
from dataclasses import dataclass
from enum import IntEnum
import itertools
import numpy as np
import zhinst.utils.shfqa as shfqa_utils
from zhinst.core import ziDAQServer
DEVICE_MIN_STATES = 2
DEVICE_MAX_STATES = 4
DEVICE_MAX_INTEGRATION_LEN = 4096
@dataclass
class QuditState:
"""Qudit state with associated reference trace.
Args:
index: A unique identifier of the state.
label: The label of the state.
ref_trace: The reference traces of the qudit. They are typically
obtained by an averaged scope measurement of the qudit's response to a
readout pulse when a certain state has been prepared.
"""
index: int
label: str
ref_trace: np.ndarray
class IntegrationWeight:
"""Represents integration weights vectors for one-vs-one classification.
Differential weight vectors are defined as the complex conjugate of the
difference between reference traces of two states. They are used for weighed
integration in the multi-state discrimination units.
Args:
state_left: The state corresponding to the reference trace used as the
left side of the subtraction operator.
state_right: The state corresponding to the reference trace used as the
right side of the subtraction operator.
"""
def __init__(self, state_left: QuditState, state_right: QuditState):
self._left_state = state_left
self._right_state = state_right
self._vector = np.conj(state_left.ref_trace - state_right.ref_trace)
self.center_threshold_ref()
@property
def left_state(self) -> QuditState:
"""The state corresponding to the left side of the subtraction."""
return self._left_state
@property
def right_state(self) -> QuditState:
"""The state corresponding to the right side of the subtraction."""
return self._right_state
@property
def vector(self) -> np.ndarray:
"""The vector of integration weights."""
return self._vector
@property
def threshold(self) -> float:
"""Get the threshold value used together with this weight."""
return self._threshold
@threshold.setter
def threshold(self, value: float) -> None:
"""Sets the threshold value used together with this weight."""
self._threshold = value
def scale(self, factor: float) -> None:
"""Scales the weight vector with a factor.
Args:
factor: Factor to scale the weight vector with.
"""
self._vector *= factor
def __array__(self) -> np.ndarray:
return self._vector
def center_threshold(self, trace1: np.ndarray, trace2: np.ndarray) -> None:
"""Center the threshold value between trace1 and trace2.
This function computes the weighted integration results using trace1
and trace2 as input and then computes the arithmetic mean of the two
results.
Args:
trace1: The first trace.
trace2: The second trace.
Returns:
The arithmetic mean of the weighted integration results between
trace1 and trace2.
"""
res1 = np.real(weighted_integration(self._vector, trace1))
res2 = np.real(weighted_integration(self._vector, trace2))
self._threshold = (res1 + res2) / 2
def center_threshold_ref(self) -> None:
"""Center the threshold value between the left and right reference traces."""
self.center_threshold(self.left_state.ref_trace, self.right_state.ref_trace)
class QuditSettings:
"""Collection of qudit settings for multistate discrimination.
Qudit settings are the integration weights, thresholds, and the assignment
vector for qudit state discrimination. These settings are initialized based
on reference traces for each state, which need to be provided as input to the
constructor of this class.
Args:
ref_traces: List of (complex-valued) reference traces,
one vector per state. The reference traces are typically obtained by an
averaged scope measurement of the readout resonator response when the
qudit is prepared in a certain state.
"""
def __init__(self, ref_traces: t.List[np.ndarray]):
# Number of states equals number of reference traces
self._num_states = len(ref_traces)
# First, make sure that all reference traces have an equal length
first_ref_len = len(ref_traces[0])
for ref_traces_idx, ref_trace in enumerate(ref_traces[1:]):
assert len(ref_trace) == first_ref_len, (
f"The length {len(ref_trace)} of ref_traces[{ref_traces_idx}] "
+ f"differs from the length of ref_traces[0]: {first_ref_len}."
)
self._states = {}
for state_idx, ref_trace in enumerate(ref_traces):
self._states[state_idx] = QuditState(state_idx, str(state_idx), ref_trace)
self._weights = []
for state_left, state_right in itertools.combinations(self._states.values(), 2):
self._weights.append(IntegrationWeight(state_left, state_right))
self.normalize_weights()
# re-center thresholds to the reference traces
self.reset_thresholds_to_center()
self._assignment_vec = self.calc_theoretical_assignment_vec()
@property
def num_states(self) -> int:
"""Number of states (d) of the qudit."""
return self._num_states
@property
def states(self) -> t.Dict[int, QuditState]:
"""Dictionary of states of the qudit.
The dictionary keys are state indices and the values are State objects.
"""
return self._states
@property
def weights(self) -> t.List[IntegrationWeight]:
"""List of weight vectors for one-vs-one classification."""
return self._weights
@property
def integration_len(self) -> int:
"""Length of the weight vectors as number of samples."""
return len(self._weights[0].vector)
@property
def thresholds(self) -> t.List[float]:
"""Threshold values, one per weight vector, for one-vs-one classification."""
return [weight.threshold for weight in self._weights]
@property
def assignment_vec(self) -> np.ndarray:
"""Vector assigning state indices for each threshold comparison outcome.
The vector has 2**( d * (d - 1) / 2 ) elements, where d is the number
of states of the qudit.
"""
return self._assignment_vec
def reset_thresholds_to_center(self) -> None:
"""Resets the thresholds of each weight to the center.
The thresholds get centered between the results of the weighted
integration using the reference traces of the corresponding pairs of
states.
"""
for weight in self._weights:
weight.center_threshold_ref()
def normalize_weights(self) -> None:
"""Scales all weight vectors with a common factor.
The common factor is chosen such that maximum absolute weight value is 1.
"""
max_abs_weight = max([np.abs(weight.vector).max() for weight in self._weights])
factor = 1 / max_abs_weight
for weight in self._weights:
weight.scale(factor)
def calc_theoretical_assignment_vec(self) -> np.ndarray:
"""Calculates the theoretical assignment vector.
The theoretical assignment vector is determined by the majority vote
(winner takes all) principle.
"""
assignment_len = 2 ** len(self._weights)
assignment_vec = np.zeros(assignment_len, dtype=int)
for assignment_idx in range(assignment_len):
state_counts = np.zeros(self._num_states, dtype=int)
for weight_idx, weight in enumerate(self._weights):
above_threshold = (assignment_idx & (2**weight_idx)) != 0
state_idx = (
weight.left_state.index
if above_threshold
else weight.right_state.index
)
state_counts[state_idx] += 1
winner_state = np.argmax(state_counts)
assignment_vec[assignment_idx] = winner_state
return assignment_vec
def _get_base_path(dev: str, qa_channel: int) -> str:
"""Gets the base node tree path of the multistate discrimination feature.
Args:
dev: The device id.
qa_channel: The QA channel index.
Returns:
The path to the multistate node tree branch.
"""
return f"/{dev}/qachannels/{qa_channel}/readout/multistate"
def _get_qudit_base_path(dev: str, qa_channel: int, qudit_idx: int) -> str:
"""Gets the base node tree path of a particular qudit.
Args:
dev: The device id
qa_channel: The QA channel index
qudit_idx: The index of the qudit
Returns:
The path to the qudit node tree branch.
"""
return _get_base_path(dev, qa_channel) + f"/qudits/{qudit_idx}"
def get_settings_transaction(
dev: str,
qa_channel: int,
qudit_idx: int,
qudit_settings: QuditSettings,
*,
enable: bool = True,
) -> t.List[t.Tuple[str, t.Any]]:
"""Compiles a list of settings to apply to the device.
Args:
dev: The device id.
qa_channel: The index of the QA channel
qudit_idx: The index of the qudit to be configured
qudit_settings: The qudit settings to be configured
enable: Whether to enable the qudit (default: True)
Returns:
List of settings defining a transaction. Each list entry is a tuple,
where the first entry specifies the node path and the second entry the
value to be configured to the respective node.
"""
# Make sure the number of states is feasible for the device
assert DEVICE_MIN_STATES <= qudit_settings.num_states <= DEVICE_MAX_STATES, (
"The number of states must be in the range"
f"[{DEVICE_MIN_STATES}, {DEVICE_MAX_STATES}] (inclusive)."
)
# Make sure the integration length is feasible for the device
assert qudit_settings.integration_len <= DEVICE_MAX_INTEGRATION_LEN, (
f"Too long integration length {qudit_settings.integration_len}. "
f"It must be less than or equal to {DEVICE_MAX_INTEGRATION_LEN}."
)
qudit_base_path = _get_qudit_base_path(dev, qa_channel, qudit_idx)
transaction = []
transaction.append((qudit_base_path + "/numstates", qudit_settings.num_states))
transaction.append((qudit_base_path + "/enable", 1 if enable else 0))
transaction.append(
(
f"/{dev}/qachannels/{qa_channel}/readout/integration/length",
qudit_settings.integration_len,
)
)
# NOTE: Upload only the first d - 1 differential weight vectors.
# The remaining pairwise difference of results will be computed in
# real time on the device in order to save hardware resources
for weight_idx, weight in enumerate(
qudit_settings.weights[: qudit_settings.num_states - 1]
):
transaction.append(
(
qudit_base_path + f"/weights/{weight_idx}/wave",
weight.vector,
)
)
threshold_base = qudit_base_path + "/thresholds"
for threshold_idx, threshold in enumerate(qudit_settings.thresholds):
transaction.append((threshold_base + f"/{threshold_idx}/value", threshold))
transaction.append(
(qudit_base_path + "/assignmentvec", qudit_settings.assignment_vec)
)
return transaction
def config_to_device(
daq: ziDAQServer,
dev: str,
qa_channel: int,
qudit_idx: int,
qudit_settings: QuditSettings,
*,
enable: bool = True,
) -> None:
"""Configures the qudit settings to the device.
Args:
daq: An instance of core.ziDAQServer
dev: The device id.
qa_channel: The index of the QA channel
qudit_idx: The index of the qudit to be configured
qudit_settings: The qudit settings to be configured
enable: Whether to enable the qudit (default: True)
"""
transaction = get_settings_transaction(
dev,
qa_channel=qa_channel,
qudit_idx=qudit_idx,
qudit_settings=qudit_settings,
enable=enable,
)
daq.set(transaction)
class _ReslogSource(IntEnum):
"""Values for the result/source node."""
RESULT_OF_INTEGRATION = 1
RESULT_OF_DISCRIMINATION = 3
def get_qudits_results(
daq: ziDAQServer, dev: str, qa_channel: int
) -> t.Dict[int, np.ndarray]:
"""Downloads the qudit results from the device and group them by qudit.
Depending on the result logger source, this function accesses the multistate
nodes to determine which integrators were used for which qudit to be able to
group the results by qudit.
Args:
daq: An instance of the core.ziDAQServer class
dev: The device id.
qa_channels: The index of the QA channel
Returns:
A dictionary with the qudit index keys and result vector values.
"""
results = shfqa_utils.get_result_logger_data(daq, dev, qa_channel, mode="readout")
result_source = daq.getInt(f"/{dev}/qachannels/{qa_channel}/readout/result/source")
base_path = _get_base_path(dev, qa_channel)
qudits_results = {}
max_num_qudits = len(daq.listNodes(base_path + "/qudits/*/enable"))
for qudit_idx in range(max_num_qudits):
qudit_base_path = _get_qudit_base_path(dev, qa_channel, qudit_idx)
enable_node_value = daq.getInt(qudit_base_path + "/enable")
is_enabled = enable_node_value != 0
if not is_enabled:
continue
if result_source == _ReslogSource.RESULT_OF_INTEGRATION:
start_idx_node = qudit_base_path + "/integrator/indexvec"
integrator_indices = daq.get(start_idx_node, flat=True)[start_idx_node][0][
"vector"
]
qudits_results[qudit_idx] = results[integrator_indices]
elif result_source == _ReslogSource.RESULT_OF_DISCRIMINATION:
qudits_results[qudit_idx] = results[qudit_idx].astype(int)
else:
raise ValueError(f"Unkown result logger source: {result_source}")
return qudits_results
def weighted_integration(weight_vec: np.ndarray, signal: np.ndarray) -> float:
"""Computes the weighted integration.
Args:
weight_vec: Vector of integration weights
signal: Vector of input signal samples
Returns:
The result of the weighted integration.
"""
return np.dot(weight_vec, signal)
def compare_threshold(threshold: float, integration_result: float) -> bool:
"""Compares an integration result with a threshold.
Args:
threshold: The threshold value
integration_result: The integration result for the comparison
Returns:
True if the integration_result is greater than the threshold,
False otherwise.
"""
return integration_result > threshold | zhinst-utils | /zhinst_utils-0.3.3-py3-none-any.whl/zhinst/utils/shfqa/multistate.py | multistate.py |
# Zurich Instruments LabOne Packages

The `zhinst` package is purely a metapackage that installs the whole Python API
stack for LabOne®, the Zurich Instruments control software.
It includes the following packages:
* [zhinst-core](https://pypi.org/project/zhinst-core/) (native Python API for LabOne)
* [zhinst-utils](https://pypi.org/project/zhinst-utils/) (utility functions for zhinst-core)
* [zhinst-toolkit](https://pypi.org/project/zhinst-toolkit/) (high-level Python API for LabOne)
This package includes everything required to interface with LabOne form within Python.
For more information see the dedicated package documentation or the
[online documentation](https://docs.zhinst.com).
WARNING: Upgrading from version <= 22.02 to 22.08 requires to uninstall this package first and the reinstalling it, `--upgrade` will corrupt the package.
| zhinst | /zhinst-23.6.2.tar.gz/zhinst-23.6.2/README.md | README.md |
# ZhiQiang, 之强
zhiqiang, 之强, become strong. And similar to ziqiang, 自强, Self-strengthening.
A platform for reinforcement learning. The framework does not depend on any specific deep learning platform. But the implemented concrete agents are written with PyTorch.
## Examples
Learning curriculum of different agents for the environment GridWorld:
<img src="https://github.com/Li-Ming-Fan/zhiqiang/blob/master/aaa_store/learning_curriculum.png" width="50%" height="50%" alt="learning_curriculum">
A replay of a trained EntropyACV agent for GridWorld:
<img src="https://github.com/Li-Ming-Fan/zhiqiang/blob/master/aaa_store/a_replay_gif.gif" width="30%" height="30%" alt="gridworld_replay_gif">
## Description
Abstract classes that form the framework:
```
from zhiqiang.agents import AbstractPQNet
from zhiqiang.agents import AbstractAgent
from zhiqiang.envs import AbstractEnv
from zhiqiang.replay_buffers import AbstractBuffer
from zhiqiang.trainers import AbstractTrainer
```
Please run commands such as
```
AbstractPQNet.print_info()
AbstractAgent.print_info()
```
to see necessary functions for implementing concrete classes.
Implemented Trainers and Buffers:
```
from zhiqiang.trainers.simple_trainer import SimpleTrainer as Trainer
from zhiqiang.trainers.paral_trainer import ParalTrainer as Trainer
from zhiqiang.replay_buffers.simple_buffer import SimpleBuffer as Buffer
from zhiqiang.replay_buffers.priority_buffer import PriorityBuffer as Buffer
```
Some of the implemented agents:
```
from zhiqiang.agents.dqn_vanila import VanilaDQN as Agent
from zhiqiang.agents.dqn_double import DoubleDQN as Agent
from zhiqiang.agents.dqn_mstep import MStepDQN as Agent
from zhiqiang.agents.dqn_priority import PriorityDQN as Agent
```
More:
```
.
├── __init__.py
├── agents
│ ├── __init__.py
│ ├── acq_entropy.py
│ ├── acv_entropy.py
│ ├── dqn_double.py
│ ├── dqn_mstep.py
│ ├── dqn_priority.py
│ ├── dqn_vanila.py
│ └── policy_mstep.py
├── envs
│ └── __init__.py
├── replay_buffers
│ ├── __init__.py
│ ├── priority_buffer.py
│ └── simple_buffer.py
├── trainers
│ ├── __init__.py
│ ├── paral_trainer.py
│ └── simple_trainer.py
└── utils
├── __init__.py
├── basic_settings.py
├── data_parallelism.py
├── log_parser.py
├── torch_utils.py
└── uct_simple.py
```
## Quick Trial
For a quick trial, please try codes in the file examples/GridWorld/script_train_simple.py:
```
# define an env
from grid_world import GridWorld as Env
# define a qnet, in PyTorch
from gridworld_qnet import GridWorldQNet as QNet
# pick an agent
from zhiqiang.agents.dqn_vanila import VanilaDQN as Agent
# from zhiqiang.agents.dqn_double import DoubleDQN as Agent
# from zhiqiang.agents.dqn_mstep import MStepDQN as Agent
# from zhiqiang.agents.dqn_priority import PriorityDQN as Agent
# pick a buffer
from zhiqiang.replay_buffers.simple_buffer import SimpleBuffer as Buffer
# from zhiqiang.replay_buffers.priority_buffer import PriorityBuffer as Buffer
# pick a trainer
from zhiqiang.trainers.simple_trainer import SimpleTrainer as Trainer
# from zhiqiang.trainers.paral_trainer import ParalTrainer as Trainer
# settings file, make sure the path is right
settings_filepath = "./data_root/settings/settings_gridworld.json"
agent_name = "agentname"
env_name = "GridWorld"
##
#
from zhiqiang.utils.basic_settings import BasicSettings
#
settings = BasicSettings(settings_filepath)
settings.env = env_name
settings.agent = agent_name
settings.check_settings()
settings.display()
#
# device
import torch
settings.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
if settings.device_type is None else torch.device(settings.device_type)
#
print("device: {}".format(settings.device))
#
# trainer
trainer = Trainer(settings, Agent, {"qnet": QNet}, Env, Buffer)
#
# train
list_aver_rewards = trainer.do_train()
#
# draw
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 5))
#
eval_period = settings.trainer_settings["eval_period"]
list_x = [idx * eval_period for idx in range(len(list_aver_rewards))]
#
print(list_x)
print(list_aver_rewards)
#
plt.plot(list_x, list_aver_rewards, label="Averaged Rewards", color="r", linewidth=2)
plt.xlabel("Number Boost")
plt.ylabel("Averaged Rewards") # plt.title("Boost Curriculum")
# plt.xticks(list_x) # plt.legend()
plt.grid()
plt.show()
```
For utilization of more agents, please see codes in the file examples/GridWorld/script_train_all.py.
## Philosophy
This package does not aim to encompass all kinds of reinforcement learning algorithms, but just to provide a framework for RL solutions of tasks.
An RL solution always involves an environment, an agent (agents) and some neural networks (as agent modules). For training the agent (agents), a trainer and a replay buffer are further required. If interface functions among these parts are well defined, then the different parts can be easy to change as plug-and-play. This is what this package aims to do.
In this package, a set of inferface functions is defined, and some simple implementations of the different parts are conducted. We hope these will pave way for users to make their own customized definitions and implementations.
## Installation
From PyPI distribution system:
```
pip install zhiqiang
```
This package is tested with PyTorch 1.4.0.
## Usage
For usage examples of this package, please see:
1, examples/GridWorld
2, examples/Atari
## Citation
If you find ZhiQiang helpful, please cite it in your publications.
```
@software{zhiqiang,
author = {Ming-Fan Li},
title = {ZhiQiang, a platform for reinforcement learning},
year = {2020},
url = {https://github.com/Li-Ming-Fan/zhiqiang}
}
```
</br>
| zhiqiang | /zhiqiang-0.1.1.tar.gz/zhiqiang-0.1.1/README.md | README.md |
from aiohttp import web
from loguru import logger
from uuid import uuid4
from zhishuyun_scaffold.settings import HTTP_HOST, HTTP_PORT
from rest_framework.exceptions import APIException as RestApiException
from rest_framework.exceptions import NotFound
from zhishuyun_scaffold.settings import ERROR_CODE_API_ERROR, \
ERROR_DETAIL_API_ERROR, ERROR_DETAIL_NOT_FOUND
class BaseController(object):
def __init__(self):
self.id = str(uuid4())
self.app = web.Application()
self.handler_class_map = {}
self.register_routes()
async def health(self, request):
# return 200 if server is healthy
logger.debug('health check')
return web.Response(text='ok', status=200)
def get_handler_class_index(self, handler_class):
return f'{handler_class.method}-{handler_class.path}'
async def handle(self, request, trace_id=None):
trace_id = trace_id or str(uuid4())
logger.debug(f'[{trace_id}] start to handle request')
handler_class_index = f'{request.method}-{request.path}'
logger.debug(f'handler class index is {handler_class_index}')
logger.debug(f'handler_class_map is {self.handler_class_map}')
response = None
try:
handler_class = self.handler_class_map.get(handler_class_index)
logger.debug(f'[{trace_id}] handler class is {handler_class}')
if not handler_class:
raise NotFound(ERROR_DETAIL_NOT_FOUND)
handler = handler_class(request, trace_id)
response = await handler.handle()
return response
except RestApiException as ex:
logger.error(f'get rest api exception {ex}')
return web.json_response(
{
'detail': ex.detail,
'code': ex.default_code
}, status=ex.status_code)
except Exception:
logger.exception(f'{trace_id} get general exception')
response = web.json_response({
'detail': ERROR_DETAIL_API_ERROR,
'code': ERROR_CODE_API_ERROR
}, status=500)
return response
def register_routes(self):
logger.debug('add router for health')
self.app.router.add_route('GET', '/health', self.health)
self.app.router.add_route('*', '/{path:.*}', self.handle)
def register_handlers(self, handler_classes):
for handler_class in handler_classes:
handler_class_index = self.get_handler_class_index(handler_class)
self.handler_class_map[handler_class_index] = handler_class
def start(self):
web.run_app(self.app, host=HTTP_HOST, port=HTTP_PORT) | zhishuyun-scaffold | /zhishuyun-scaffold-0.0.1.tar.gz/zhishuyun-scaffold-0.0.1/zhishuyun_scaffold/controllers/base.py | base.py |
# zhixuewang-python
## 安装:
### 使用 pip 安装(推荐)
```bash
pip install zhixuewang
```
### 下载 源码 安装
把项目源码下载后, 在项目根目录输入
```bash
python setup install
```
或直接
```bash
git clone https://github.com/anwenhu/zhixuewang
cd zhixuewang
python setup.py install
```
## 快速使用
### 登录:
保证你已经安装好zhixuewang后, 通过这样来获取自己最新一次考试成绩并打印到屏幕上::
```python
from zhixuewang import login
username = input("你的用户名:").strip()
password = input("你的密码:").strip()
zxw = login(username, password)
print(zxw.get_self_mark())
```
在输入智学网用户名和密码后, 屏幕上会显示形如::
```python
name 语文:
分数: 105
name 数学:
分数: 120
name 英语:
分数: 132
name 物理:
分数: 68
name 化学:
分数: 52
name 政治:
分数: 49
name 历史:
分数: 59
name 总分:
分数: 585
其中 `name` 的位置应该显示你的名字
如果你想要查询指定考试的成绩, 如查询"某中学第二次月考", 可这样做(假定你已经运行了前面的代码)::
```python
print(zxw.get_self_mark("某中学第二次月考"))
```
(注: 因为智学网移除了部分接口, 所以查询班级平均分, 班级最高分等功能暂时失效)
具体的还可查看[Wiki](https://zhixuewang-python.readthedocs.io/zh_CN/latest/)~(不过Wiki正在制作中,如果愿意帮忙维护的话,可以加我QQ: 1223009522 备注github wiki)~
## 问题和建议
如果您在使用的过程中遇到任何问题,欢迎前往 [Issue](https://github.com/anwenhu/zhixuewang/issues)提问
当然也可以加入这个QQ群讨论:862767072
| zhixuewang | /zhixuewang-1.0.2.tar.gz/zhixuewang-1.0.2/README.md | README.md |
!function(a,b){"object"==typeof exports&&"undefined"!=typeof module?b(exports):"function"==typeof define&&define.amd?define(["exports"],b):b(a.RSVP=a.RSVP||{})}(this,function(a){"use strict";function b(a,b){for(var c=0,d=a.length;c<d;c++)if(a[c]===b)return c;return-1}function c(a){var b=a._promiseCallbacks;return b||(b=a._promiseCallbacks={}),b}function d(a,b){if(2!==arguments.length)return wa[a];wa[a]=b}function e(a){var b=typeof a;return null!==a&&("object"===b||"function"===b)}function f(a){return"function"==typeof a}function g(a){return null!==a&&"object"==typeof a}function h(a){return null!==a&&"object"==typeof a}function i(){setTimeout(function(){for(var a=0;a<Aa.length;a++){var b=Aa[a],c=b.payload;c.guid=c.key+c.id,c.childGuid=c.key+c.childId,c.error&&(c.stack=c.error.stack),wa.trigger(b.name,b.payload)}Aa.length=0},50)}function j(a,b,c){1===Aa.push({name:a,payload:{key:b._guidKey,id:b._id,eventName:a,detail:b._result,childId:c&&c._id,label:b._label,timeStamp:za(),error:wa["instrument-with-stack"]?new Error(b._label):null}})&&i()}function k(a,b){var c=this;if(a&&"object"==typeof a&&a.constructor===c)return a;var d=new c(m,b);return s(d,a),d}function l(){return new TypeError("A promises callback cannot return that same promise.")}function m(){}function n(a){try{return a.then}catch(a){return Ea.error=a,Ea}}function o(a,b,c,d){try{a.call(b,c,d)}catch(a){return a}}function p(a,b,c){wa.async(function(a){var d=!1,e=o(c,b,function(c){d||(d=!0,b!==c?s(a,c,void 0):u(a,c))},function(b){d||(d=!0,v(a,b))},"Settle: "+(a._label||" unknown promise"));!d&&e&&(d=!0,v(a,e))},a)}function q(a,b){b._state===Ca?u(a,b._result):b._state===Da?(b._onError=null,v(a,b._result)):w(b,void 0,function(c){b!==c?s(a,c,void 0):u(a,c)},function(b){return v(a,b)})}function r(a,b,c){b.constructor===a.constructor&&c===C&&a.constructor.resolve===k?q(a,b):c===Ea?(v(a,Ea.error),Ea.error=null):f(c)?p(a,b,c):u(a,b)}function s(a,b){a===b?u(a,b):e(b)?r(a,b,n(b)):u(a,b)}function t(a){a._onError&&a._onError(a._result),x(a)}function u(a,b){a._state===Ba&&(a._result=b,a._state=Ca,0===a._subscribers.length?wa.instrument&&j("fulfilled",a):wa.async(x,a))}function v(a,b){a._state===Ba&&(a._state=Da,a._result=b,wa.async(t,a))}function w(a,b,c,d){var e=a._subscribers,f=e.length;a._onError=null,e[f]=b,e[f+Ca]=c,e[f+Da]=d,0===f&&a._state&&wa.async(x,a)}function x(a){var b=a._subscribers,c=a._state;if(wa.instrument&&j(c===Ca?"fulfilled":"rejected",a),0!==b.length){for(var d=void 0,e=void 0,f=a._result,g=0;g<b.length;g+=3)d=b[g],e=b[g+c],d?A(c,d,e,f):e(f);a._subscribers.length=0}}function y(){this.error=null}function z(a,b){try{return a(b)}catch(a){return Fa.error=a,Fa}}function A(a,b,c,d){var e=f(c),g=void 0,h=void 0;if(e){if((g=z(c,d))===Fa)h=g.error,g.error=null;else if(g===b)return void v(b,l())}else g=d;b._state!==Ba||(e&&void 0===h?s(b,g):void 0!==h?v(b,h):a===Ca?u(b,g):a===Da&&v(b,g))}function B(a,b){var c=!1;try{b(function(b){c||(c=!0,s(a,b))},function(b){c||(c=!0,v(a,b))})}catch(b){v(a,b)}}function C(a,b,c){var d=this,e=d._state;if(e===Ca&&!a||e===Da&&!b)return wa.instrument&&j("chained",d,d),d;d._onError=null;var f=new d.constructor(m,c),g=d._result;if(wa.instrument&&j("chained",d,f),e===Ba)w(d,f,a,b);else{var h=e===Ca?a:b;wa.async(function(){return A(e,f,h,g)})}return f}function D(a,b,c){return a===Ca?{state:"fulfilled",value:c}:{state:"rejected",reason:c}}function E(a,b){return ya(a)?new Ga(this,a,!0,b).promise:this.reject(new TypeError("Promise.all must be called with an array"),b)}function F(a,b){var c=this,d=new c(m,b);if(!ya(a))return v(d,new TypeError("Promise.race must be called with an array")),d;for(var e=0;d._state===Ba&&e<a.length;e++)w(c.resolve(a[e]),void 0,function(a){return s(d,a)},function(a){return v(d,a)});return d}function G(a,b){var c=this,d=new c(m,b);return v(d,a),d}function H(){throw new TypeError("You must pass a resolver function as the first argument to the promise constructor")}function I(){throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.")}function J(){this.value=void 0}function K(a){try{return a.then}catch(a){return Ka.value=a,Ka}}function L(a,b,c){try{a.apply(b,c)}catch(a){return Ka.value=a,Ka}}function M(a,b){for(var c={},d=a.length,e=new Array(d),f=0;f<d;f++)e[f]=a[f];for(var g=0;g<b.length;g++){c[b[g]]=e[g+1]}return c}function N(a){for(var b=a.length,c=new Array(b-1),d=1;d<b;d++)c[d-1]=a[d];return c}function O(a,b){return{then:function(c,d){return a.call(b,c,d)}}}function P(a,b){var c=function(){for(var c=this,d=arguments.length,e=new Array(d+1),f=!1,g=0;g<d;++g){var h=arguments[g];if(!f){if((f=S(h))===La){var i=new Ja(m);return v(i,La.value),i}f&&!0!==f&&(h=O(f,h))}e[g]=h}var j=new Ja(m);return e[d]=function(a,c){a?v(j,a):void 0===b?s(j,c):!0===b?s(j,N(arguments)):ya(b)?s(j,M(arguments,b)):s(j,c)},f?R(j,e,a,c):Q(j,e,a,c)};return c.__proto__=a,c}function Q(a,b,c,d){var e=L(c,d,b);return e===Ka&&v(a,e.value),a}function R(a,b,c,d){return Ja.all(b).then(function(b){var e=L(c,d,b);return e===Ka&&v(a,e.value),a})}function S(a){return!(!a||"object"!=typeof a)&&(a.constructor===Ja||K(a))}function T(a,b){return Ja.all(a,b)}function U(a,b){if(!a)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!b||"object"!=typeof b&&"function"!=typeof b?a:b}function V(a,b){if("function"!=typeof b&&null!==b)throw new TypeError("Super expression must either be null or a function, not "+typeof b);a.prototype=Object.create(b&&b.prototype,{constructor:{value:a,enumerable:!1,writable:!0,configurable:!0}}),b&&(Object.setPrototypeOf?Object.setPrototypeOf(a,b):a.__proto__=b)}function W(a,b){return ya(a)?new Ma(Ja,a,b).promise:Ja.reject(new TypeError("Promise.allSettled must be called with an array"),b)}function X(a,b){return Ja.race(a,b)}function Y(a,b){if(!a)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!b||"object"!=typeof b&&"function"!=typeof b?a:b}function Z(a,b){if("function"!=typeof b&&null!==b)throw new TypeError("Super expression must either be null or a function, not "+typeof b);a.prototype=Object.create(b&&b.prototype,{constructor:{value:a,enumerable:!1,writable:!0,configurable:!0}}),b&&(Object.setPrototypeOf?Object.setPrototypeOf(a,b):a.__proto__=b)}function $(a,b){return g(a)?new Oa(Ja,a,b).promise:Ja.reject(new TypeError("Promise.hash must be called with an object"),b)}function _(a,b){if(!a)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!b||"object"!=typeof b&&"function"!=typeof b?a:b}function aa(a,b){if("function"!=typeof b&&null!==b)throw new TypeError("Super expression must either be null or a function, not "+typeof b);a.prototype=Object.create(b&&b.prototype,{constructor:{value:a,enumerable:!1,writable:!0,configurable:!0}}),b&&(Object.setPrototypeOf?Object.setPrototypeOf(a,b):a.__proto__=b)}function ba(a,b){return g(a)?new Pa(Ja,a,!1,b).promise:Ja.reject(new TypeError("RSVP.hashSettled must be called with an object"),b)}function ca(a){throw setTimeout(function(){throw a}),a}function da(a){var b={resolve:void 0,reject:void 0};return b.promise=new Ja(function(a,c){b.resolve=a,b.reject=c},a),b}function ea(a,b,c){return ya(a)?f(b)?Ja.all(a,c).then(function(a){for(var d=a.length,e=new Array(d),f=0;f<d;f++)e[f]=b(a[f]);return Ja.all(e,c)}):Ja.reject(new TypeError("RSVP.map expects a function as a second argument"),c):Ja.reject(new TypeError("RSVP.map must be called with an array"),c)}function fa(a,b){return Ja.resolve(a,b)}function ga(a,b){return Ja.reject(a,b)}function ha(a,b){return Ja.all(a,b)}function ia(a,b){return Ja.resolve(a,b).then(function(a){return ha(a,b)})}function ja(a,b,c){return ya(a)||g(a)&&void 0!==a.then?f(b)?(ya(a)?ha(a,c):ia(a,c)).then(function(a){for(var d=a.length,e=new Array(d),f=0;f<d;f++)e[f]=b(a[f]);return ha(e,c).then(function(b){for(var c=new Array(d),e=0,f=0;f<d;f++)b[f]&&(c[e]=a[f],e++);return c.length=e,c})}):Ja.reject(new TypeError("RSVP.filter expects function as a second argument"),c):Ja.reject(new TypeError("RSVP.filter must be called with an array or promise"),c)}function ka(a,b){Xa[Qa]=a,Xa[Qa+1]=b,2===(Qa+=2)&&Ya()}function la(){var a=process.nextTick,b=process.versions.node.match(/^(?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)$/);return Array.isArray(b)&&"0"===b[1]&&"10"===b[2]&&(a=setImmediate),function(){return a(qa)}}function ma(){return void 0!==Ra?function(){Ra(qa)}:pa()}function na(){var a=0,b=new Ua(qa),c=document.createTextNode("");return b.observe(c,{characterData:!0}),function(){return c.data=a=++a%2}}function oa(){var a=new MessageChannel;return a.port1.onmessage=qa,function(){return a.port2.postMessage(0)}}function pa(){return function(){return setTimeout(qa,1)}}function qa(){for(var a=0;a<Qa;a+=2){(0,Xa[a])(Xa[a+1]),Xa[a]=void 0,Xa[a+1]=void 0}Qa=0}function ra(){try{var a=require,b=a("vertx");return Ra=b.runOnLoop||b.runOnContext,ma()}catch(a){return pa()}}function sa(a,b,c){return b in a?Object.defineProperty(a,b,{value:c,enumerable:!0,configurable:!0,writable:!0}):a[b]=c,a}function ta(){wa.on.apply(wa,arguments)}function ua(){wa.off.apply(wa,arguments)}var va={mixin:function(a){return a.on=this.on,a.off=this.off,a.trigger=this.trigger,a._promiseCallbacks=void 0,a},on:function(a,d){if("function"!=typeof d)throw new TypeError("Callback must be a function");var e=c(this),f=void 0;f=e[a],f||(f=e[a]=[]),-1===b(f,d)&&f.push(d)},off:function(a,d){var e=c(this),f=void 0,g=void 0;if(!d)return void(e[a]=[]);f=e[a],-1!==(g=b(f,d))&&f.splice(g,1)},trigger:function(a,b,d){var e=c(this),f=void 0;if(f=e[a])for(var g=0;g<f.length;g++)(0,f[g])(b,d)}},wa={instrument:!1};va.mixin(wa);var xa=void 0;xa=Array.isArray?Array.isArray:function(a){return"[object Array]"===Object.prototype.toString.call(a)};var ya=xa,za=Date.now||function(){return(new Date).getTime()},Aa=[],Ba=void 0,Ca=1,Da=2,Ea=new y,Fa=new y,Ga=function(){function a(a,b,c,d){this._instanceConstructor=a,this.promise=new a(m,d),this._abortOnReject=c,this._init.apply(this,arguments)}return a.prototype._init=function(a,b){var c=b.length||0;this.length=c,this._remaining=c,this._result=new Array(c),this._enumerate(b),0===this._remaining&&u(this.promise,this._result)},a.prototype._enumerate=function(a){for(var b=this.length,c=this.promise,d=0;c._state===Ba&&d<b;d++)this._eachEntry(a[d],d)},a.prototype._settleMaybeThenable=function(a,b){var c=this._instanceConstructor,d=c.resolve;if(d===k){var e=n(a);if(e===C&&a._state!==Ba)a._onError=null,this._settledAt(a._state,b,a._result);else if("function"!=typeof e)this._remaining--,this._result[b]=this._makeResult(Ca,b,a);else if(c===Ja){var f=new c(m);r(f,a,e),this._willSettleAt(f,b)}else this._willSettleAt(new c(function(b){return b(a)}),b)}else this._willSettleAt(d(a),b)},a.prototype._eachEntry=function(a,b){h(a)?this._settleMaybeThenable(a,b):(this._remaining--,this._result[b]=this._makeResult(Ca,b,a))},a.prototype._settledAt=function(a,b,c){var d=this.promise;d._state===Ba&&(this._abortOnReject&&a===Da?v(d,c):(this._remaining--,this._result[b]=this._makeResult(a,b,c),0===this._remaining&&u(d,this._result)))},a.prototype._makeResult=function(a,b,c){return c},a.prototype._willSettleAt=function(a,b){var c=this;w(a,void 0,function(a){return c._settledAt(Ca,b,a)},function(a){return c._settledAt(Da,b,a)})},a}(),Ha="rsvp_"+za()+"-",Ia=0,Ja=function(){function a(b,c){this._id=Ia++,this._label=c,this._state=void 0,this._result=void 0,this._subscribers=[],wa.instrument&&j("created",this),m!==b&&("function"!=typeof b&&H(),this instanceof a?B(this,b):I())}return a.prototype._onError=function(a){var b=this;wa.after(function(){b._onError&&wa.trigger("error",a,b._label)})},a.prototype.catch=function(a,b){return this.then(void 0,a,b)},a.prototype.finally=function(a,b){var c=this,d=c.constructor;return c.then(function(b){return d.resolve(a()).then(function(){return b})},function(b){return d.resolve(a()).then(function(){throw b})},b)},a}();Ja.cast=k,Ja.all=E,Ja.race=F,Ja.resolve=k,Ja.reject=G,Ja.prototype._guidKey=Ha,Ja.prototype.then=C;var Ka=new J,La=new J,Ma=function(a){function b(b,c,d){return U(this,a.call(this,b,c,!1,d))}return V(b,a),b}(Ga);Ma.prototype._makeResult=D;var Na=Object.prototype.hasOwnProperty,Oa=function(a){function b(b,c){var d=!(arguments.length>2&&void 0!==arguments[2])||arguments[2],e=arguments[3];return Y(this,a.call(this,b,c,d,e))}return Z(b,a),b.prototype._init=function(a,b){this._result={},this._enumerate(b),0===this._remaining&&u(this.promise,this._result)},b.prototype._enumerate=function(a){var b=this.promise,c=[];for(var d in a)Na.call(a,d)&&c.push({position:d,entry:a[d]});var e=c.length;this._remaining=e;for(var f=void 0,g=0;b._state===Ba&&g<e;g++)f=c[g],this._eachEntry(f.entry,f.position)},b}(Ga),Pa=function(a){function b(b,c,d){return _(this,a.call(this,b,c,!1,d))}return aa(b,a),b}(Oa);Pa.prototype._makeResult=D;var Qa=0,Ra=void 0,Sa="undefined"!=typeof window?window:void 0,Ta=Sa||{},Ua=Ta.MutationObserver||Ta.WebKitMutationObserver,Va="undefined"==typeof self&&"undefined"!=typeof process&&"[object process]"==={}.toString.call(process),Wa="undefined"!=typeof Uint8ClampedArray&&"undefined"!=typeof importScripts&&"undefined"!=typeof MessageChannel,Xa=new Array(1e3),Ya=void 0;Ya=Va?la():Ua?na():Wa?oa():void 0===Sa&&"function"==typeof require?ra():pa();if("object"==typeof self)self;else{if("object"!=typeof global)throw new Error("no global: `self` or `global` found");global}var Za;wa.async=ka,wa.after=function(a){return setTimeout(a,0)};var $a=fa,_a=function(a,b){return wa.async(a,b)};if("undefined"!=typeof window&&"object"==typeof window.__PROMISE_INSTRUMENTATION__){var ab=window.__PROMISE_INSTRUMENTATION__;d("instrument",!0);for(var bb in ab)ab.hasOwnProperty(bb)&&ta(bb,ab[bb])}var cb=(Za={asap:ka,cast:$a,Promise:Ja,EventTarget:va,all:T,allSettled:W,race:X,hash:$,hashSettled:ba,rethrow:ca,defer:da,denodeify:P,configure:d,on:ta,off:ua,resolve:fa,reject:ga,map:ea},sa(Za,"async",_a),sa(Za,"filter",ja),Za);a.default=cb,a.asap=ka,a.cast=$a,a.Promise=Ja,a.EventTarget=va,a.all=T,a.allSettled=W,a.race=X,a.hash=$,a.hashSettled=ba,a.rethrow=ca,a.defer=da,a.denodeify=P,a.configure=d,a.on=ta,a.off=ua,a.resolve=fa,a.reject=ga,a.map=ea,a.async=_a,a.filter=ja,Object.defineProperty(a,"__esModule",{value:!0})});var EPUBJS=EPUBJS||{};EPUBJS.core={};var ELEMENT_NODE=1,TEXT_NODE=3,COMMENT_NODE=8,DOCUMENT_NODE=9;EPUBJS.core.getEl=function(a){return document.getElementById(a)},EPUBJS.core.getEls=function(a){return document.getElementsByClassName(a)},EPUBJS.core.request=function(a,b,c){var d,e=window.URL,f=e?"blob":"arraybuffer",g=new RSVP.defer,h=new XMLHttpRequest,i=XMLHttpRequest.prototype,j=function(){var a;this.readyState==this.DONE&&(200!==this.status&&0!==this.status||!this.response?g.reject({message:this.response,stack:(new Error).stack}):(a="xml"==b?this.responseXML?this.responseXML:(new DOMParser).parseFromString(this.response,"application/xml"):"xhtml"==b?this.responseXML?this.responseXML:(new DOMParser).parseFromString(this.response,"application/xhtml+xml"):"html"==b?this.responseXML?this.responseXML:(new DOMParser).parseFromString(this.response,"text/html"):"json"==b?JSON.parse(this.response):"blob"==b?e?this.response:new Blob([this.response]):this.response,g.resolve(a)))};return"overrideMimeType"in i||Object.defineProperty(i,"overrideMimeType",{value:function(a){}}),h.onreadystatechange=j,h.open("GET",a,!0),c&&(h.withCredentials=!0),b||(d=EPUBJS.core.uri(a),b=d.extension,b={htm:"html"}[b]||b),"blob"==b&&(h.responseType=f),"json"==b&&h.setRequestHeader("Accept","application/json"),"xml"==b&&(h.responseType="document",h.overrideMimeType("text/xml")),"xhtml"==b&&(h.responseType="document"),"html"==b&&(h.responseType="document"),"binary"==b&&(h.responseType="arraybuffer"),h.send(),g.promise},EPUBJS.core.toArray=function(a){var b=[];for(var c in a){var d;a.hasOwnProperty(c)&&(d=a[c],d.ident=c,b.push(d))}return b},EPUBJS.core.uri=function(a){var b,c,d,e={protocol:"",host:"",path:"",origin:"",directory:"",base:"",filename:"",extension:"",fragment:"",href:a},f=a.indexOf("blob:"),g=a.indexOf("://"),h=a.indexOf("?"),i=a.indexOf("#");return 0===f?(e.protocol="blob",e.base=a.indexOf(0,i),e):(-1!=i&&(e.fragment=a.slice(i+1),a=a.slice(0,i)),-1!=h&&(e.search=a.slice(h+1),a=a.slice(0,h),href=e.href),-1!=g?(e.protocol=a.slice(0,g),b=a.slice(g+3),d=b.indexOf("/"),-1===d?(e.host=e.path,e.path=""):(e.host=b.slice(0,d),e.path=b.slice(d)),e.origin=e.protocol+"://"+e.host,e.directory=EPUBJS.core.folder(e.path),e.base=e.origin+e.directory):(e.path=a,e.directory=EPUBJS.core.folder(a),e.base=e.directory),e.filename=a.replace(e.base,""),c=e.filename.lastIndexOf("."),-1!=c&&(e.extension=e.filename.slice(c+1)),e)},EPUBJS.core.folder=function(a){var b=a.lastIndexOf("/");if(-1==b);return a.slice(0,b+1)},EPUBJS.core.dataURLToBlob=function(a){var b,c,d,e,f,g=";base64,";if(-1==a.indexOf(g))return b=a.split(","),c=b[0].split(":")[1],d=b[1],new Blob([d],{type:c});b=a.split(g),c=b[0].split(":")[1],d=window.atob(b[1]),e=d.length,f=new Uint8Array(e);for(var h=0;h<e;++h)f[h]=d.charCodeAt(h);return new Blob([f],{type:c})},EPUBJS.core.addScript=function(a,b,c){var d,e;e=!1,d=document.createElement("script"),d.type="text/javascript",d.async=!1,d.src=a,d.onload=d.onreadystatechange=function(){e||this.readyState&&"complete"!=this.readyState||(e=!0,b&&b())},c=c||document.body,c.appendChild(d)},EPUBJS.core.addScripts=function(a,b,c){var d=a.length,e=0,f=function(){e++,d==e?b&&b():EPUBJS.core.addScript(a[e],f,c)};EPUBJS.core.addScript(a[e],f,c)},EPUBJS.core.addCss=function(a,b,c){var d,e;e=!1,d=document.createElement("link"),d.type="text/css",d.rel="stylesheet",d.href=a,d.onload=d.onreadystatechange=function(){e||this.readyState&&"complete"!=this.readyState||(e=!0,b&&b())},c=c||document.body,c.appendChild(d)},EPUBJS.core.prefixed=function(a){var b=["Webkit","Moz","O","ms"],c=a[0].toUpperCase()+a.slice(1),d=b.length;if(void 0!==document.documentElement.style[a])return a;for(var e=0;e<d;e++)if(void 0!==document.documentElement.style[b[e]+c])return b[e]+c;return a},EPUBJS.core.resolveUrl=function(a,b){var c,d,e=[],f=EPUBJS.core.uri(b),g=a.split("/");return f.host?b:(g.pop(),d=b.split("/"),d.forEach(function(a){".."===a?g.pop():e.push(a)}),c=g.concat(e),c.join("/"))},EPUBJS.core.uuid=function(){var a=(new Date).getTime();return"xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g,function(b){var c=(a+16*Math.random())%16|0;return a=Math.floor(a/16),("x"==b?c:7&c|8).toString(16)})},EPUBJS.core.insert=function(a,b,c){var d=EPUBJS.core.locationOf(a,b,c);return b.splice(d,0,a),d},EPUBJS.core.locationOf=function(a,b,c,d,e){var f,g=d||0,h=e||b.length,i=parseInt(g+(h-g)/2);return c||(c=function(a,b){return a>b?1:a<b?-1:(a=b)?0:void 0}),h-g<=0?i:(f=c(b[i],a),h-g==1?f>0?i:i+1:0===f?i:-1===f?EPUBJS.core.locationOf(a,b,c,i,h):EPUBJS.core.locationOf(a,b,c,g,i))},EPUBJS.core.indexOfSorted=function(a,b,c,d,e){var f,g=d||0,h=e||b.length,i=parseInt(g+(h-g)/2);return c||(c=function(a,b){return a>b?1:a<b?-1:(a=b)?0:void 0}),h-g<=0?-1:(f=c(b[i],a),h-g==1?0===f?i:-1:0===f?i:-1===f?EPUBJS.core.indexOfSorted(a,b,c,i,h):EPUBJS.core.indexOfSorted(a,b,c,g,i))},EPUBJS.core.queue=function(a){var b=[],c=a,d=function(a,c,d){return b.push({funcName:a,args:c,context:d}),b},e=function(){var a;b.length&&(a=b.shift(),c[a.funcName].apply(a.context||c,a.args))};return{enqueue:d,dequeue:e,flush:function(){for(;b.length;)e()},clear:function(){b=[]},length:function(){return b.length}}},EPUBJS.core.getElementXPath=function(a){return a&&a.id?'//*[@id="'+a.id+'"]':EPUBJS.core.getElementTreeXPath(a)},EPUBJS.core.getElementTreeXPath=function(a){var b,c,d,e,f=[],g="http://www.w3.org/1999/xhtml"===a.ownerDocument.documentElement.getAttribute("xmlns");for(a.nodeType===Node.TEXT_NODE&&(b=EPUBJS.core.indexOfTextNode(a)+1,f.push("text()["+b+"]"),a=a.parentNode);a&&1==a.nodeType;a=a.parentNode){b=0;for(var h=a.previousSibling;h;h=h.previousSibling)h.nodeType!=Node.DOCUMENT_TYPE_NODE&&h.nodeName==a.nodeName&&++b;c=a.nodeName.toLowerCase(),d=g?"xhtml:"+c:c,e=b?"["+(b+1)+"]":"",f.splice(0,0,d+e)}return f.length?"./"+f.join("/"):null},EPUBJS.core.nsResolver=function(a){return{xhtml:"http://www.w3.org/1999/xhtml",epub:"http://www.idpf.org/2007/ops"}[a]||null},EPUBJS.core.cleanStringForXpath=function(a){var b=a.match(/[^'"]+|['"]/g);return b=b.map(function(a){return"'"===a?'"\'"':'"'===a?"'\"'":"'"+a+"'"}),"concat('',"+b.join(",")+")"},EPUBJS.core.indexOfTextNode=function(a){for(var b,c=a.parentNode,d=c.childNodes,e=-1,f=0;f<d.length&&(b=d[f],b.nodeType===Node.TEXT_NODE&&e++,b!=a);f++);return e},EPUBJS.core.defaults=function(a){for(var b=1,c=arguments.length;b<c;b++){var d=arguments[b];for(var e in d)void 0===a[e]&&(a[e]=d[e])}return a},EPUBJS.core.extend=function(a){return[].slice.call(arguments,1).forEach(function(b){b&&Object.getOwnPropertyNames(b).forEach(function(c){Object.defineProperty(a,c,Object.getOwnPropertyDescriptor(b,c))})}),a},EPUBJS.core.clone=function(a){return EPUBJS.core.isArray(a)?a.slice():EPUBJS.core.extend({},a)},EPUBJS.core.isElement=function(a){return!(!a||1!=a.nodeType)},EPUBJS.core.isNumber=function(a){return!isNaN(parseFloat(a))&&isFinite(a)},EPUBJS.core.isString=function(a){return"string"==typeof a||a instanceof String},EPUBJS.core.isArray=Array.isArray||function(a){return"[object Array]"===Object.prototype.toString.call(a)},EPUBJS.core.values=function(a){var b,c,d,e=-1;if(!a)return[];for(b=Object.keys(a),c=b.length,d=Array(c);++e<c;)d[e]=a[b[e]];return d},EPUBJS.core.indexOfNode=function(a,b){for(var c,d=a.parentNode,e=d.childNodes,f=-1,g=0;g<e.length&&(c=e[g],c.nodeType===b&&f++,c!=a);g++);return f},EPUBJS.core.indexOfTextNode=function(a){return EPUBJS.core.indexOfNode(a,TEXT_NODE)},EPUBJS.core.indexOfElementNode=function(a){return EPUBJS.core.indexOfNode(a,ELEMENT_NODE)};var EPUBJS=EPUBJS||{};EPUBJS.reader={},EPUBJS.reader.plugins={},function(a,b){var c=(a.ePubReader,a.ePubReader=function(a,b){return new EPUBJS.Reader(a,b)});"function"==typeof define&&define.amd?define(function(){return Reader}):"undefined"!=typeof module&&module.exports&&(module.exports=c)}(window,jQuery),EPUBJS.Reader=function(a,b){var c,d,e,f=this,g=$("#viewer"),h=window.location.search;this.settings=EPUBJS.core.defaults(b||{},{bookPath:a,restore:!1,reload:!1,bookmarks:void 0,annotations:void 0,contained:void 0,bookKey:void 0,styles:void 0,sidebarReflow:!1,generatePagination:!1,history:!0}),h&&(e=h.slice(1).split("&"),e.forEach(function(a){var b=a.split("="),c=b[0],d=b[1]||"";f.settings[c]=decodeURIComponent(d)})),this.setBookKey(this.settings.bookPath),this.settings.restore&&this.isSaved()&&this.applySavedSettings(),this.settings.styles=this.settings.styles||{fontSize:"100%"},this.book=c=new ePub(this.settings.bookPath,this.settings),this.offline=!1,this.sidebarOpen=!1,this.settings.bookmarks||(this.settings.bookmarks=[]),this.settings.annotations||(this.settings.annotations=[]),this.settings.generatePagination&&c.generatePagination(g.width(),g.height()),this.rendition=c.renderTo("viewer",{ignoreClass:"annotator-hl",width:"100%",height:"100%"}),this.settings.previousLocationCfi?this.displayed=this.rendition.display(this.settings.previousLocationCfi):this.displayed=this.rendition.display(),c.ready.then(function(){f.ReaderController=EPUBJS.reader.ReaderController.call(f,c),f.SettingsController=EPUBJS.reader.SettingsController.call(f,c),f.ControlsController=EPUBJS.reader.ControlsController.call(f,c),f.SidebarController=EPUBJS.reader.SidebarController.call(f,c),f.BookmarksController=EPUBJS.reader.BookmarksController.call(f,c),f.NotesController=EPUBJS.reader.NotesController.call(f,c),window.addEventListener("hashchange",this.hashChanged.bind(this),!1),document.addEventListener("keydown",this.adjustFontSize.bind(this),!1),this.rendition.on("keydown",this.adjustFontSize.bind(this)),this.rendition.on("keydown",f.ReaderController.arrowKeys.bind(this)),this.rendition.on("selected",this.selectedRange.bind(this))}.bind(this)).then(function(){f.ReaderController.hideLoader()}.bind(this));for(d in EPUBJS.reader.plugins)EPUBJS.reader.plugins.hasOwnProperty(d)&&(f[d]=EPUBJS.reader.plugins[d].call(f,c));return c.loaded.metadata.then(function(a){f.MetaController=EPUBJS.reader.MetaController.call(f,a)}),c.loaded.navigation.then(function(a){f.TocController=EPUBJS.reader.TocController.call(f,a)}),window.addEventListener("beforeunload",this.unload.bind(this),!1),this},EPUBJS.Reader.prototype.adjustFontSize=function(a){var b,c=2,d=a.ctrlKey||a.metaKey;this.settings.styles&&(this.settings.styles.fontSize||(this.settings.styles.fontSize="100%"),b=parseInt(this.settings.styles.fontSize.slice(0,-1)),d&&187==a.keyCode&&(a.preventDefault(),this.book.setStyle("fontSize",b+c+"%")),d&&189==a.keyCode&&(a.preventDefault(),this.book.setStyle("fontSize",b-c+"%")),d&&48==a.keyCode&&(a.preventDefault(),this.book.setStyle("fontSize","100%")))},EPUBJS.Reader.prototype.addBookmark=function(a){this.isBookmarked(a)>-1||(this.settings.bookmarks.push(a),this.trigger("reader:bookmarked",a))},EPUBJS.Reader.prototype.removeBookmark=function(a){var b=this.isBookmarked(a);-1!==b&&(this.settings.bookmarks.splice(b,1),this.trigger("reader:unbookmarked",b))},EPUBJS.Reader.prototype.isBookmarked=function(a){return this.settings.bookmarks.indexOf(a)},EPUBJS.Reader.prototype.clearBookmarks=function(){this.settings.bookmarks=[]},EPUBJS.Reader.prototype.addNote=function(a){this.settings.annotations.push(a)},EPUBJS.Reader.prototype.removeNote=function(a){var b=this.settings.annotations.indexOf(a);-1!==b&&delete this.settings.annotations[b]},EPUBJS.Reader.prototype.clearNotes=function(){this.settings.annotations=[]},EPUBJS.Reader.prototype.setBookKey=function(a){return this.settings.bookKey||(this.settings.bookKey="epubjsreader:"+EPUBJS.VERSION+":"+window.location.host+":"+a),this.settings.bookKey},EPUBJS.Reader.prototype.isSaved=function(a){return!!localStorage&&null!==localStorage.getItem(this.settings.bookKey)},EPUBJS.Reader.prototype.removeSavedSettings=function(){if(!localStorage)return!1;localStorage.removeItem(this.settings.bookKey)},EPUBJS.Reader.prototype.applySavedSettings=function(){var a;if(!localStorage)return!1;try{a=JSON.parse(localStorage.getItem(this.settings.bookKey))}catch(a){return!1}return!!a&&(a.styles&&(this.settings.styles=EPUBJS.core.defaults(this.settings.styles||{},a.styles)),this.settings=EPUBJS.core.defaults(this.settings,a),!0)},EPUBJS.Reader.prototype.saveSettings=function(){if(this.book&&(this.settings.previousLocationCfi=this.rendition.currentLocation().start.cfi),!localStorage)return!1;localStorage.setItem(this.settings.bookKey,JSON.stringify(this.settings))},EPUBJS.Reader.prototype.unload=function(){this.settings.restore&&localStorage&&this.saveSettings()},EPUBJS.Reader.prototype.hashChanged=function(){var a=window.location.hash.slice(1);this.rendition.display(a)},EPUBJS.Reader.prototype.selectedRange=function(a){var b="#"+a;this.settings.history&&window.location.hash!=b&&(history.pushState({},"",b),this.currentLocationCfi=a)},RSVP.EventTarget.mixin(EPUBJS.Reader.prototype),EPUBJS.reader.BookmarksController=function(){var a=this.book,b=this.rendition,c=$("#bookmarksView"),d=c.find("#bookmarks"),e=document.createDocumentFragment(),f=function(){c.show()},g=function(){c.hide()},h=0,i=function(c){var d=document.createElement("li"),e=document.createElement("a");d.id="bookmark-"+h,d.classList.add("list_item");var f,g=a.spine.get(c);return g.index in a.navigation.toc?(f=a.navigation.toc[g.index],e.textContent=f.label):e.textContent=c,e.href=c,e.classList.add("bookmark_link"),e.addEventListener("click",function(a){var c=this.getAttribute("href");b.display(c),a.preventDefault()},!1),d.appendChild(e),h++,d};return this.settings.bookmarks.forEach(function(a){var b=i(a);e.appendChild(b)}),d.append(e),this.on("reader:bookmarked",function(a){var b=i(a);d.append(b)}),this.on("reader:unbookmarked",function(a){$("#bookmark-"+a).remove()}),{show:f,hide:g}},EPUBJS.reader.ControlsController=function(a){var b=this,c=this.rendition,d=($("#store"),$("#fullscreen")),e=($("#fullscreenicon"),$("#cancelfullscreenicon"),$("#slider")),f=($("#main"),$("#sidebar"),$("#setting")),g=$("#bookmark");return e.on("click",function(){b.sidebarOpen?(b.SidebarController.hide(),e.addClass("icon-menu"),e.removeClass("icon-right")):(b.SidebarController.show(),e.addClass("icon-right"),e.removeClass("icon-menu"))}),"undefined"!=typeof screenfull&&(d.on("click",function(){screenfull.toggle($("#container")[0])}),screenfull.raw&&document.addEventListener(screenfull.raw.fullscreenchange,function(){fullscreen=screenfull.isFullscreen,fullscreen?d.addClass("icon-resize-small").removeClass("icon-resize-full"):d.addClass("icon-resize-full").removeClass("icon-resize-small")})),f.on("click",function(){b.SettingsController.show()}),g.on("click",function(){var a=b.rendition.currentLocation().start.cfi;-1===b.isBookmarked(a)?(b.addBookmark(a),g.addClass("icon-bookmark").removeClass("icon-bookmark-empty")):(b.removeBookmark(a),g.removeClass("icon-bookmark").addClass("icon-bookmark-empty"))}),c.on("relocated",function(a){var c=a.start.cfi,d="#"+c;-1===b.isBookmarked(c)?g.removeClass("icon-bookmark").addClass("icon-bookmark-empty"):g.addClass("icon-bookmark").removeClass("icon-bookmark-empty"),b.currentLocationCfi=c,b.settings.history&&window.location.hash!=d&&history.pushState({},"",d)}),{}},EPUBJS.reader.MetaController=function(a){var b=a.title,c=a.creator,d=$("#book-title"),e=$("#chapter-title"),f=$("#title-seperator");document.title=b+" – "+c,d.html(b),e.html(c),f.show()},EPUBJS.reader.NotesController=function(){var a=this.book,b=this.rendition,c=this,d=$("#notesView"),e=$("#notes"),f=$("#note-text"),g=$("#note-anchor"),h=c.settings.annotations,i=a.renderer,j=[],k=new ePub.CFI,l=function(){d.show()},m=function(){d.hide()},n=function(d){var e,h,i,j,l,m=a.renderer.doc;if(m.caretPositionFromPoint?(e=m.caretPositionFromPoint(d.clientX,d.clientY),h=e.offsetNode,i=e.offset):m.caretRangeFromPoint&&(e=m.caretRangeFromPoint(d.clientX,d.clientY),h=e.startContainer,i=e.startOffset),3!==h.nodeType)for(var q=0;q<h.childNodes.length;q++)if(3==h.childNodes[q].nodeType){h=h.childNodes[q];break}i=h.textContent.indexOf(".",i),-1===i?i=h.length:i+=1,j=k.generateCfiFromTextNode(h,i,a.renderer.currentChapter.cfiBase),l={annotatedAt:new Date,anchor:j,body:f.val()},c.addNote(l),o(l),p(l),f.val(""),g.text("Attach"),f.prop("disabled",!1),b.off("click",n)},o=function(a){var c=document.createElement("li"),d=document.createElement("a");c.innerHTML=a.body,d.innerHTML=" context »",d.href="#"+a.anchor,d.onclick=function(){return b.display(a.anchor),!1},c.appendChild(d),e.append(c)},p=function(b){var c=a.renderer.doc,d=document.createElement("span"),e=document.createElement("a");d.classList.add("footnotesuperscript","reader_generated"),d.style.verticalAlign="super",d.style.fontSize=".75em",d.style.lineHeight="1em",e.style.padding="2px",e.style.backgroundColor="#fffa96",e.style.borderRadius="5px",e.style.cursor="pointer",d.id="note-"+EPUBJS.core.uuid(),e.innerHTML=h.indexOf(b)+1+"[Reader]",d.appendChild(e),k.addMarker(b.anchor,c,d),q(d,b.body)},q=function(a,d){var e=a.id,f=function(){var c,f,l,m,n=i.height,o=i.width,p=225;j[e]||(j[e]=document.createElement("div"),j[e].setAttribute("class","popup"),pop_content=document.createElement("div"),j[e].appendChild(pop_content),pop_content.innerHTML=d,pop_content.setAttribute("class","pop_content"),i.render.document.body.appendChild(j[e]),j[e].addEventListener("mouseover",g,!1),j[e].addEventListener("mouseout",h,!1),b.on("locationChanged",k,this),b.on("locationChanged",h,this)),c=j[e],f=a.getBoundingClientRect(),l=f.left,m=f.top,c.classList.add("show"),popRect=c.getBoundingClientRect(),c.style.left=l-popRect.width/2+"px",c.style.top=m+"px",p>n/2.5&&(p=n/2.5,pop_content.style.maxHeight=p+"px"),popRect.height+m>=n-25?(c.style.top=m-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),l-popRect.width<=0?(c.style.left=l+"px",c.classList.add("left")):c.classList.remove("left"),l+popRect.width/2>=o?(c.style.left=l-300+"px",popRect=c.getBoundingClientRect(),c.style.left=l-popRect.width+"px",popRect.height+m>=n-25?(c.style.top=m-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),c.classList.add("right")):c.classList.remove("right")},g=function(){j[e].classList.add("on")},h=function(){j[e].classList.remove("on")},k=function(){setTimeout(function(){j[e].classList.remove("show")},100)},m=function(){c.ReaderController.slideOut(),l()};a.addEventListener("mouseover",f,!1),a.addEventListener("mouseout",k,!1),a.addEventListener("click",m,!1)};return g.on("click",function(a){g.text("Cancel"),f.prop("disabled","true"),b.on("click",n)}),h.forEach(function(a){o(a)}),{show:l,hide:m}},EPUBJS.reader.ReaderController=function(a){var b=$("#main"),c=$("#divider"),d=$("#loader"),e=$("#next"),f=$("#prev"),g=this,a=this.book,h=this.rendition,i=function(){h.currentLocation().start.cfi;g.settings.sidebarReflow?(b.removeClass("single"),b.one("transitionend",function(){h.resize()})):b.removeClass("closed")},j=function(){var a=h.currentLocation();if(a){a.start.cfi;g.settings.sidebarReflow?(b.addClass("single"),b.one("transitionend",function(){h.resize()})):b.addClass("closed")}},k=function(){d.show(),n()},l=function(){d.hide()},m=function(){c.addClass("show")},n=function(){c.removeClass("show")},o=!1,p=function(b){37==b.keyCode&&("rtl"===a.package.metadata.direction?h.next():h.prev(),f.addClass("active"),o=!0,setTimeout(function(){o=!1,f.removeClass("active")},100),b.preventDefault()),39==b.keyCode&&("rtl"===a.package.metadata.direction?h.prev():h.next(),e.addClass("active"),o=!0,setTimeout(function(){o=!1,e.removeClass("active")},100),b.preventDefault())};return document.addEventListener("keydown",p,!1),e.on("click",function(b){"rtl"===a.package.metadata.direction?h.prev():h.next(),b.preventDefault()}),f.on("click",function(b){"rtl"===a.package.metadata.direction?h.next():h.prev(),b.preventDefault()}),h.on("layout",function(a){!0===a.spread?m():n()}),h.on("relocated",function(a){a.atStart&&f.addClass("disabled"),a.atEnd&&e.addClass("disabled")}),{slideOut:j,slideIn:i,showLoader:k,hideLoader:l,showDivider:m,hideDivider:n,arrowKeys:p}},EPUBJS.reader.SettingsController=function(){var a=(this.book,this),b=$("#settings-modal"),c=$(".overlay"),d=function(){b.addClass("md-show")},e=function(){b.removeClass("md-show")};return $("#sidebarReflow").on("click",function(){a.settings.sidebarReflow=!a.settings.sidebarReflow}),b.find(".closer").on("click",function(){e()}),c.on("click",function(){e()}),{show:d,hide:e}},EPUBJS.reader.SidebarController=function(a){var b=this,c=$("#sidebar"),d=$("#panels"),e="Toc",f=function(a){var c=a+"Controller";e!=a&&void 0!==b[c]&&(b[e+"Controller"].hide(),b[c].show(),e=a,d.find(".active").removeClass("active"),d.find("#show-"+a).addClass("active"))},g=function(){return e},h=function(){b.sidebarOpen=!0,b.ReaderController.slideOut(),c.addClass("open")},i=function(){b.sidebarOpen=!1,b.ReaderController.slideIn(),c.removeClass("open")};return d.find(".show_view").on("click",function(a){var b=$(this).data("view");f(b),a.preventDefault()}),{show:h,hide:i,getActivePanel:g,changePanelTo:f}},EPUBJS.reader.TocController=function(a){var b=(this.book,this.rendition),c=$("#tocView"),d=document.createDocumentFragment(),e=!1,f=function(a,b){var c=document.createElement("ul");return b||(b=1),a.forEach(function(a){var d=document.createElement("li"),e=document.createElement("a");toggle=document.createElement("a");var g;d.id="toc-"+a.id,d.classList.add("list_item"),e.textContent=a.label,e.href=a.href,e.classList.add("toc_link"),d.appendChild(e),a.subitems&&a.subitems.length>0&&(b++,g=f(a.subitems,b),toggle.classList.add("toc_toggle"),d.insertBefore(toggle,e),d.appendChild(g)),c.appendChild(d)}),c},g=function(){c.show()},h=function(){c.hide()},i=function(a){var b=a.id,d=c.find("#toc-"+b),f=c.find(".currentChapter");c.find(".openChapter");d.length&&(d!=f&&d.has(e).length>0&&f.removeClass("currentChapter"),d.addClass("currentChapter"),d.parents("li").addClass("openChapter"))};b.on("renderered",i);var j=f(a);return d.appendChild(j),c.append(d),c.find(".toc_link").on("click",function(a){var d=this.getAttribute("href");a.preventDefault(),b.display(d),c.find(".currentChapter").addClass("openChapter").removeClass("currentChapter"),$(this).parent("li").addClass("currentChapter")}),c.find(".toc_toggle").on("click",function(a){var b=$(this).parent("li"),c=b.hasClass("openChapter");a.preventDefault(),c?b.removeClass("openChapter"):b.addClass("openChapter")}),{show:g,hide:h}}; | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/reader.min.js | reader.min.js |
EPUBJS.Hooks.register("beforeChapterDisplay").endnotes=function(a,b){var c=b.contents.querySelectorAll("a[href]"),d=Array.prototype.slice.call(c),e=EPUBJS.core.folder(location.pathname),f=(EPUBJS.cssPath,{});EPUBJS.core.addCss(EPUBJS.cssPath+"popup.css",!1,b.render.document.head),d.forEach(function(a){function c(){var c,h,n=b.height,o=b.width,p=225;m||(c=j.cloneNode(!0),m=c.querySelector("p")),f[i]||(f[i]=document.createElement("div"),f[i].setAttribute("class","popup"),pop_content=document.createElement("div"),f[i].appendChild(pop_content),pop_content.appendChild(m),pop_content.setAttribute("class","pop_content"),b.render.document.body.appendChild(f[i]),f[i].addEventListener("mouseover",d,!1),f[i].addEventListener("mouseout",e,!1),b.on("renderer:pageChanged",g,this),b.on("renderer:pageChanged",e,this)),c=f[i],h=a.getBoundingClientRect(),k=h.left,l=h.top,c.classList.add("show"),popRect=c.getBoundingClientRect(),c.style.left=k-popRect.width/2+"px",c.style.top=l+"px",p>n/2.5&&(p=n/2.5,pop_content.style.maxHeight=p+"px"),popRect.height+l>=n-25?(c.style.top=l-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),k-popRect.width<=0?(c.style.left=k+"px",c.classList.add("left")):c.classList.remove("left"),k+popRect.width/2>=o?(c.style.left=k-300+"px",popRect=c.getBoundingClientRect(),c.style.left=k-popRect.width+"px",popRect.height+l>=n-25?(c.style.top=l-popRect.height+"px",c.classList.add("above")):c.classList.remove("above"),c.classList.add("right")):c.classList.remove("right")}function d(){f[i].classList.add("on")}function e(){f[i].classList.remove("on")}function g(){setTimeout(function(){f[i].classList.remove("show")},100)}var h,i,j,k,l,m;"noteref"==a.getAttribute("epub:type")&&(h=a.getAttribute("href"),i=h.replace("#",""),j=b.render.document.getElementById(i),a.addEventListener("mouseover",c,!1),a.addEventListener("mouseout",g,!1))}),a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").mathml=function(a,b){if(b.currentChapter.manifestProperties.indexOf("mathml")!==-1){b.render.iframe.contentWindow.mathmlCallback=a;var c=document.createElement("script");c.type="text/x-mathjax-config",c.innerHTML=' MathJax.Hub.Register.StartupHook("End",function () { window.mathmlCallback(); }); MathJax.Hub.Config({jax: ["input/TeX","input/MathML","output/SVG"],extensions: ["tex2jax.js","mml2jax.js","MathEvents.js"],TeX: {extensions: ["noErrors.js","noUndefined.js","autoload-all.js"]},MathMenu: {showRenderer: false},menuSettings: {zoom: "Click"},messageStyle: "none"}); ',b.doc.body.appendChild(c),EPUBJS.core.addScript("http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML",null,b.doc.head)}else a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").smartimages=function(a,b){var c=b.contents.querySelectorAll("img"),d=Array.prototype.slice.call(c),e=b.height;if("reflowable"!=b.layoutSettings.layout)return void a();d.forEach(function(a){var c=function(){var c,d=a.getBoundingClientRect(),f=d.height,g=d.top,h=a.getAttribute("data-height"),i=h||f,j=Number(getComputedStyle(a,"").fontSize.match(/(\d*(\.\d*)?)px/)[1]),k=j?j/2:0;e=b.contents.clientHeight,g<0&&(g=0),a.style.maxWidth="100%",i+g>=e?(g<e/2?(c=e-g-k,a.style.maxHeight=c+"px",a.style.width="auto"):(i>e&&(a.style.maxHeight=e+"px",a.style.width="auto",d=a.getBoundingClientRect(),i=d.height),a.style.display="block",a.style.WebkitColumnBreakBefore="always",a.style.breakBefore="column"),a.setAttribute("data-height",c)):(a.style.removeProperty("max-height"),a.style.removeProperty("margin-top"))},d=function(){b.off("renderer:resized",c),b.off("renderer:chapterUnload",this)};a.addEventListener("load",c,!1),b.on("renderer:resized",c),b.on("renderer:chapterUnload",d),c()}),a&&a()},EPUBJS.Hooks.register("beforeChapterDisplay").transculsions=function(a,b){var c=b.contents.querySelectorAll("[transclusion]");Array.prototype.slice.call(c).forEach(function(a){function c(){j=g,k=h,j>chapter.colWidth&&(d=chapter.colWidth/j,j=chapter.colWidth,k*=d),f.width=j,f.height=k}var d,e=a.getAttribute("ref"),f=document.createElement("iframe"),g=a.getAttribute("width"),h=a.getAttribute("height"),i=a.parentNode,j=g,k=h;c(),b.listenUntil("renderer:resized","renderer:chapterUnloaded",c),f.src=e,i.replaceChild(f,a)}),a&&a()}; | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/hooks.min.js | hooks.min.js |
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(factory((global.RSVP = global.RSVP || {})));
}(this, (function (exports) { 'use strict';
function indexOf(callbacks, callback) {
for (var i = 0, l = callbacks.length; i < l; i++) {
if (callbacks[i] === callback) {
return i;
}
}
return -1;
}
function callbacksFor(object) {
var callbacks = object._promiseCallbacks;
if (!callbacks) {
callbacks = object._promiseCallbacks = {};
}
return callbacks;
}
/**
@class RSVP.EventTarget
*/
var EventTarget = {
/**
`RSVP.EventTarget.mixin` extends an object with EventTarget methods. For
Example:
```javascript
let object = {};
RSVP.EventTarget.mixin(object);
object.on('finished', function(event) {
// handle event
});
object.trigger('finished', { detail: value });
```
`EventTarget.mixin` also works with prototypes:
```javascript
let Person = function() {};
RSVP.EventTarget.mixin(Person.prototype);
let yehuda = new Person();
let tom = new Person();
yehuda.on('poke', function(event) {
console.log('Yehuda says OW');
});
tom.on('poke', function(event) {
console.log('Tom says OW');
});
yehuda.trigger('poke');
tom.trigger('poke');
```
@method mixin
@for RSVP.EventTarget
@private
@param {Object} object object to extend with EventTarget methods
*/
mixin: function (object) {
object['on'] = this['on'];
object['off'] = this['off'];
object['trigger'] = this['trigger'];
object._promiseCallbacks = undefined;
return object;
},
/**
Registers a callback to be executed when `eventName` is triggered
```javascript
object.on('event', function(eventInfo){
// handle the event
});
object.trigger('event');
```
@method on
@for RSVP.EventTarget
@private
@param {String} eventName name of the event to listen for
@param {Function} callback function to be called when the event is triggered.
*/
on: function (eventName, callback) {
if (typeof callback !== 'function') {
throw new TypeError('Callback must be a function');
}
var allCallbacks = callbacksFor(this),
callbacks = void 0;
callbacks = allCallbacks[eventName];
if (!callbacks) {
callbacks = allCallbacks[eventName] = [];
}
if (indexOf(callbacks, callback) === -1) {
callbacks.push(callback);
}
},
/**
You can use `off` to stop firing a particular callback for an event:
```javascript
function doStuff() { // do stuff! }
object.on('stuff', doStuff);
object.trigger('stuff'); // doStuff will be called
// Unregister ONLY the doStuff callback
object.off('stuff', doStuff);
object.trigger('stuff'); // doStuff will NOT be called
```
If you don't pass a `callback` argument to `off`, ALL callbacks for the
event will not be executed when the event fires. For example:
```javascript
let callback1 = function(){};
let callback2 = function(){};
object.on('stuff', callback1);
object.on('stuff', callback2);
object.trigger('stuff'); // callback1 and callback2 will be executed.
object.off('stuff');
object.trigger('stuff'); // callback1 and callback2 will not be executed!
```
@method off
@for RSVP.EventTarget
@private
@param {String} eventName event to stop listening to
@param {Function} callback optional argument. If given, only the function
given will be removed from the event's callback queue. If no `callback`
argument is given, all callbacks will be removed from the event's callback
queue.
*/
off: function (eventName, callback) {
var allCallbacks = callbacksFor(this),
callbacks = void 0,
index = void 0;
if (!callback) {
allCallbacks[eventName] = [];
return;
}
callbacks = allCallbacks[eventName];
index = indexOf(callbacks, callback);
if (index !== -1) {
callbacks.splice(index, 1);
}
},
/**
Use `trigger` to fire custom events. For example:
```javascript
object.on('foo', function(){
console.log('foo event happened!');
});
object.trigger('foo');
// 'foo event happened!' logged to the console
```
You can also pass a value as a second argument to `trigger` that will be
passed as an argument to all event listeners for the event:
```javascript
object.on('foo', function(value){
console.log(value.name);
});
object.trigger('foo', { name: 'bar' });
// 'bar' logged to the console
```
@method trigger
@for RSVP.EventTarget
@private
@param {String} eventName name of the event to be triggered
@param {*} options optional value to be passed to any event handlers for
the given `eventName`
*/
trigger: function (eventName, options, label) {
var allCallbacks = callbacksFor(this),
callbacks = void 0,
callback = void 0;
if (callbacks = allCallbacks[eventName]) {
// Don't cache the callbacks.length since it may grow
for (var i = 0; i < callbacks.length; i++) {
callback = callbacks[i];
callback(options, label);
}
}
}
};
var config = {
instrument: false
};
EventTarget['mixin'](config);
function configure(name, value) {
if (arguments.length === 2) {
config[name] = value;
} else {
return config[name];
}
}
function objectOrFunction(x) {
var type = typeof x;
return x !== null && (type === 'object' || type === 'function');
}
function isFunction(x) {
return typeof x === 'function';
}
function isObject(x) {
return x !== null && typeof x === 'object';
}
function isMaybeThenable(x) {
return x !== null && typeof x === 'object';
}
var _isArray = void 0;
if (Array.isArray) {
_isArray = Array.isArray;
} else {
_isArray = function (x) {
return Object.prototype.toString.call(x) === '[object Array]';
};
}
var isArray = _isArray;
// Date.now is not available in browsers < IE9
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/now#Compatibility
var now = Date.now || function () {
return new Date().getTime();
};
var queue = [];
function scheduleFlush() {
setTimeout(function () {
for (var i = 0; i < queue.length; i++) {
var entry = queue[i];
var payload = entry.payload;
payload.guid = payload.key + payload.id;
payload.childGuid = payload.key + payload.childId;
if (payload.error) {
payload.stack = payload.error.stack;
}
config['trigger'](entry.name, entry.payload);
}
queue.length = 0;
}, 50);
}
function instrument(eventName, promise, child) {
if (1 === queue.push({
name: eventName,
payload: {
key: promise._guidKey,
id: promise._id,
eventName: eventName,
detail: promise._result,
childId: child && child._id,
label: promise._label,
timeStamp: now(),
error: config["instrument-with-stack"] ? new Error(promise._label) : null
} })) {
scheduleFlush();
}
}
/**
`RSVP.Promise.resolve` returns a promise that will become resolved with the
passed `value`. It is shorthand for the following:
```javascript
let promise = new RSVP.Promise(function(resolve, reject){
resolve(1);
});
promise.then(function(value){
// value === 1
});
```
Instead of writing the above, your code now simply becomes the following:
```javascript
let promise = RSVP.Promise.resolve(1);
promise.then(function(value){
// value === 1
});
```
@method resolve
@static
@param {*} object value that the returned promise will be resolved with
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise that will become fulfilled with the given
`value`
*/
function resolve$1(object, label) {
/*jshint validthis:true */
var Constructor = this;
if (object && typeof object === 'object' && object.constructor === Constructor) {
return object;
}
var promise = new Constructor(noop, label);
resolve(promise, object);
return promise;
}
function withOwnPromise() {
return new TypeError('A promises callback cannot return that same promise.');
}
function noop() {}
var PENDING = void 0;
var FULFILLED = 1;
var REJECTED = 2;
var GET_THEN_ERROR = new ErrorObject();
function getThen(promise) {
try {
return promise.then;
} catch (error) {
GET_THEN_ERROR.error = error;
return GET_THEN_ERROR;
}
}
function tryThen(then$$1, value, fulfillmentHandler, rejectionHandler) {
try {
then$$1.call(value, fulfillmentHandler, rejectionHandler);
} catch (e) {
return e;
}
}
function handleForeignThenable(promise, thenable, then$$1) {
config.async(function (promise) {
var sealed = false;
var error = tryThen(then$$1, thenable, function (value) {
if (sealed) {
return;
}
sealed = true;
if (thenable !== value) {
resolve(promise, value, undefined);
} else {
fulfill(promise, value);
}
}, function (reason) {
if (sealed) {
return;
}
sealed = true;
reject(promise, reason);
}, 'Settle: ' + (promise._label || ' unknown promise'));
if (!sealed && error) {
sealed = true;
reject(promise, error);
}
}, promise);
}
function handleOwnThenable(promise, thenable) {
if (thenable._state === FULFILLED) {
fulfill(promise, thenable._result);
} else if (thenable._state === REJECTED) {
thenable._onError = null;
reject(promise, thenable._result);
} else {
subscribe(thenable, undefined, function (value) {
if (thenable !== value) {
resolve(promise, value, undefined);
} else {
fulfill(promise, value);
}
}, function (reason) {
return reject(promise, reason);
});
}
}
function handleMaybeThenable(promise, maybeThenable, then$$1) {
var isOwnThenable = maybeThenable.constructor === promise.constructor && then$$1 === then && promise.constructor.resolve === resolve$1;
if (isOwnThenable) {
handleOwnThenable(promise, maybeThenable);
} else if (then$$1 === GET_THEN_ERROR) {
reject(promise, GET_THEN_ERROR.error);
GET_THEN_ERROR.error = null;
} else if (isFunction(then$$1)) {
handleForeignThenable(promise, maybeThenable, then$$1);
} else {
fulfill(promise, maybeThenable);
}
}
function resolve(promise, value) {
if (promise === value) {
fulfill(promise, value);
} else if (objectOrFunction(value)) {
handleMaybeThenable(promise, value, getThen(value));
} else {
fulfill(promise, value);
}
}
function publishRejection(promise) {
if (promise._onError) {
promise._onError(promise._result);
}
publish(promise);
}
function fulfill(promise, value) {
if (promise._state !== PENDING) {
return;
}
promise._result = value;
promise._state = FULFILLED;
if (promise._subscribers.length === 0) {
if (config.instrument) {
instrument('fulfilled', promise);
}
} else {
config.async(publish, promise);
}
}
function reject(promise, reason) {
if (promise._state !== PENDING) {
return;
}
promise._state = REJECTED;
promise._result = reason;
config.async(publishRejection, promise);
}
function subscribe(parent, child, onFulfillment, onRejection) {
var subscribers = parent._subscribers;
var length = subscribers.length;
parent._onError = null;
subscribers[length] = child;
subscribers[length + FULFILLED] = onFulfillment;
subscribers[length + REJECTED] = onRejection;
if (length === 0 && parent._state) {
config.async(publish, parent);
}
}
function publish(promise) {
var subscribers = promise._subscribers;
var settled = promise._state;
if (config.instrument) {
instrument(settled === FULFILLED ? 'fulfilled' : 'rejected', promise);
}
if (subscribers.length === 0) {
return;
}
var child = void 0,
callback = void 0,
result = promise._result;
for (var i = 0; i < subscribers.length; i += 3) {
child = subscribers[i];
callback = subscribers[i + settled];
if (child) {
invokeCallback(settled, child, callback, result);
} else {
callback(result);
}
}
promise._subscribers.length = 0;
}
function ErrorObject() {
this.error = null;
}
var TRY_CATCH_ERROR = new ErrorObject();
function tryCatch(callback, result) {
try {
return callback(result);
} catch (e) {
TRY_CATCH_ERROR.error = e;
return TRY_CATCH_ERROR;
}
}
function invokeCallback(state, promise, callback, result) {
var hasCallback = isFunction(callback);
var value = void 0,
error = void 0;
if (hasCallback) {
value = tryCatch(callback, result);
if (value === TRY_CATCH_ERROR) {
error = value.error;
value.error = null; // release
} else if (value === promise) {
reject(promise, withOwnPromise());
return;
}
} else {
value = result;
}
if (promise._state !== PENDING) {
// noop
} else if (hasCallback && error === undefined) {
resolve(promise, value);
} else if (error !== undefined) {
reject(promise, error);
} else if (state === FULFILLED) {
fulfill(promise, value);
} else if (state === REJECTED) {
reject(promise, value);
}
}
function initializePromise(promise, resolver) {
var resolved = false;
try {
resolver(function (value) {
if (resolved) {
return;
}
resolved = true;
resolve(promise, value);
}, function (reason) {
if (resolved) {
return;
}
resolved = true;
reject(promise, reason);
});
} catch (e) {
reject(promise, e);
}
}
function then(onFulfillment, onRejection, label) {
var parent = this;
var state = parent._state;
if (state === FULFILLED && !onFulfillment || state === REJECTED && !onRejection) {
config.instrument && instrument('chained', parent, parent);
return parent;
}
parent._onError = null;
var child = new parent.constructor(noop, label);
var result = parent._result;
config.instrument && instrument('chained', parent, child);
if (state === PENDING) {
subscribe(parent, child, onFulfillment, onRejection);
} else {
var callback = state === FULFILLED ? onFulfillment : onRejection;
config.async(function () {
return invokeCallback(state, child, callback, result);
});
}
return child;
}
var Enumerator = function () {
function Enumerator(Constructor, input, abortOnReject, label) {
this._instanceConstructor = Constructor;
this.promise = new Constructor(noop, label);
this._abortOnReject = abortOnReject;
this._init.apply(this, arguments);
}
Enumerator.prototype._init = function _init(Constructor, input) {
var len = input.length || 0;
this.length = len;
this._remaining = len;
this._result = new Array(len);
this._enumerate(input);
if (this._remaining === 0) {
fulfill(this.promise, this._result);
}
};
Enumerator.prototype._enumerate = function _enumerate(input) {
var length = this.length;
var promise = this.promise;
for (var i = 0; promise._state === PENDING && i < length; i++) {
this._eachEntry(input[i], i);
}
};
Enumerator.prototype._settleMaybeThenable = function _settleMaybeThenable(entry, i) {
var c = this._instanceConstructor;
var resolve$$1 = c.resolve;
if (resolve$$1 === resolve$1) {
var then$$1 = getThen(entry);
if (then$$1 === then && entry._state !== PENDING) {
entry._onError = null;
this._settledAt(entry._state, i, entry._result);
} else if (typeof then$$1 !== 'function') {
this._remaining--;
this._result[i] = this._makeResult(FULFILLED, i, entry);
} else if (c === Promise) {
var promise = new c(noop);
handleMaybeThenable(promise, entry, then$$1);
this._willSettleAt(promise, i);
} else {
this._willSettleAt(new c(function (resolve$$1) {
return resolve$$1(entry);
}), i);
}
} else {
this._willSettleAt(resolve$$1(entry), i);
}
};
Enumerator.prototype._eachEntry = function _eachEntry(entry, i) {
if (isMaybeThenable(entry)) {
this._settleMaybeThenable(entry, i);
} else {
this._remaining--;
this._result[i] = this._makeResult(FULFILLED, i, entry);
}
};
Enumerator.prototype._settledAt = function _settledAt(state, i, value) {
var promise = this.promise;
if (promise._state === PENDING) {
if (this._abortOnReject && state === REJECTED) {
reject(promise, value);
} else {
this._remaining--;
this._result[i] = this._makeResult(state, i, value);
if (this._remaining === 0) {
fulfill(promise, this._result);
}
}
}
};
Enumerator.prototype._makeResult = function _makeResult(state, i, value) {
return value;
};
Enumerator.prototype._willSettleAt = function _willSettleAt(promise, i) {
var enumerator = this;
subscribe(promise, undefined, function (value) {
return enumerator._settledAt(FULFILLED, i, value);
}, function (reason) {
return enumerator._settledAt(REJECTED, i, reason);
});
};
return Enumerator;
}();
function makeSettledResult(state, position, value) {
if (state === FULFILLED) {
return {
state: 'fulfilled',
value: value
};
} else {
return {
state: 'rejected',
reason: value
};
}
}
/**
`RSVP.Promise.all` accepts an array of promises, and returns a new promise which
is fulfilled with an array of fulfillment values for the passed promises, or
rejected with the reason of the first passed promise to be rejected. It casts all
elements of the passed iterable to promises as it runs this algorithm.
Example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [ promise1, promise2, promise3 ];
RSVP.Promise.all(promises).then(function(array){
// The array here would be [ 1, 2, 3 ];
});
```
If any of the `promises` given to `RSVP.all` are rejected, the first promise
that is rejected will be given as an argument to the returned promises's
rejection handler. For example:
Example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error("2"));
let promise3 = RSVP.reject(new Error("3"));
let promises = [ promise1, promise2, promise3 ];
RSVP.Promise.all(promises).then(function(array){
// Code here never runs because there are rejected promises!
}, function(error) {
// error.message === "2"
});
```
@method all
@static
@param {Array} entries array of promises
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when all `promises` have been
fulfilled, or rejected if any of them become rejected.
@static
*/
function all(entries, label) {
if (!isArray(entries)) {
return this.reject(new TypeError("Promise.all must be called with an array"), label);
}
return new Enumerator(this, entries, true /* abort on reject */, label).promise;
}
/**
`RSVP.Promise.race` returns a new promise which is settled in the same way as the
first passed promise to settle.
Example:
```javascript
let promise1 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 1');
}, 200);
});
let promise2 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 2');
}, 100);
});
RSVP.Promise.race([promise1, promise2]).then(function(result){
// result === 'promise 2' because it was resolved before promise1
// was resolved.
});
```
`RSVP.Promise.race` is deterministic in that only the state of the first
settled promise matters. For example, even if other promises given to the
`promises` array argument are resolved, but the first settled promise has
become rejected before the other promises became fulfilled, the returned
promise will become rejected:
```javascript
let promise1 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 1');
}, 200);
});
let promise2 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
reject(new Error('promise 2'));
}, 100);
});
RSVP.Promise.race([promise1, promise2]).then(function(result){
// Code here never runs
}, function(reason){
// reason.message === 'promise 2' because promise 2 became rejected before
// promise 1 became fulfilled
});
```
An example real-world use case is implementing timeouts:
```javascript
RSVP.Promise.race([ajax('foo.json'), timeout(5000)])
```
@method race
@static
@param {Array} entries array of promises to observe
@param {String} label optional string for describing the promise returned.
Useful for tooling.
@return {Promise} a promise which settles in the same way as the first passed
promise to settle.
*/
function race(entries, label) {
/*jshint validthis:true */
var Constructor = this;
var promise = new Constructor(noop, label);
if (!isArray(entries)) {
reject(promise, new TypeError('Promise.race must be called with an array'));
return promise;
}
for (var i = 0; promise._state === PENDING && i < entries.length; i++) {
subscribe(Constructor.resolve(entries[i]), undefined, function (value) {
return resolve(promise, value);
}, function (reason) {
return reject(promise, reason);
});
}
return promise;
}
/**
`RSVP.Promise.reject` returns a promise rejected with the passed `reason`.
It is shorthand for the following:
```javascript
let promise = new RSVP.Promise(function(resolve, reject){
reject(new Error('WHOOPS'));
});
promise.then(function(value){
// Code here doesn't run because the promise is rejected!
}, function(reason){
// reason.message === 'WHOOPS'
});
```
Instead of writing the above, your code now simply becomes the following:
```javascript
let promise = RSVP.Promise.reject(new Error('WHOOPS'));
promise.then(function(value){
// Code here doesn't run because the promise is rejected!
}, function(reason){
// reason.message === 'WHOOPS'
});
```
@method reject
@static
@param {*} reason value that the returned promise will be rejected with.
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise rejected with the given `reason`.
*/
function reject$1(reason, label) {
/*jshint validthis:true */
var Constructor = this;
var promise = new Constructor(noop, label);
reject(promise, reason);
return promise;
}
var guidKey = 'rsvp_' + now() + '-';
var counter = 0;
function needsResolver() {
throw new TypeError('You must pass a resolver function as the first argument to the promise constructor');
}
function needsNew() {
throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.");
}
/**
Promise objects represent the eventual result of an asynchronous operation. The
primary way of interacting with a promise is through its `then` method, which
registers callbacks to receive either a promise’s eventual value or the reason
why the promise cannot be fulfilled.
Terminology
-----------
- `promise` is an object or function with a `then` method whose behavior conforms to this specification.
- `thenable` is an object or function that defines a `then` method.
- `value` is any legal JavaScript value (including undefined, a thenable, or a promise).
- `exception` is a value that is thrown using the throw statement.
- `reason` is a value that indicates why a promise was rejected.
- `settled` the final resting state of a promise, fulfilled or rejected.
A promise can be in one of three states: pending, fulfilled, or rejected.
Promises that are fulfilled have a fulfillment value and are in the fulfilled
state. Promises that are rejected have a rejection reason and are in the
rejected state. A fulfillment value is never a thenable.
Promises can also be said to *resolve* a value. If this value is also a
promise, then the original promise's settled state will match the value's
settled state. So a promise that *resolves* a promise that rejects will
itself reject, and a promise that *resolves* a promise that fulfills will
itself fulfill.
Basic Usage:
------------
```js
let promise = new Promise(function(resolve, reject) {
// on success
resolve(value);
// on failure
reject(reason);
});
promise.then(function(value) {
// on fulfillment
}, function(reason) {
// on rejection
});
```
Advanced Usage:
---------------
Promises shine when abstracting away asynchronous interactions such as
`XMLHttpRequest`s.
```js
function getJSON(url) {
return new Promise(function(resolve, reject){
let xhr = new XMLHttpRequest();
xhr.open('GET', url);
xhr.onreadystatechange = handler;
xhr.responseType = 'json';
xhr.setRequestHeader('Accept', 'application/json');
xhr.send();
function handler() {
if (this.readyState === this.DONE) {
if (this.status === 200) {
resolve(this.response);
} else {
reject(new Error('getJSON: `' + url + '` failed with status: [' + this.status + ']'));
}
}
};
});
}
getJSON('/posts.json').then(function(json) {
// on fulfillment
}, function(reason) {
// on rejection
});
```
Unlike callbacks, promises are great composable primitives.
```js
Promise.all([
getJSON('/posts'),
getJSON('/comments')
]).then(function(values){
values[0] // => postsJSON
values[1] // => commentsJSON
return values;
});
```
@class RSVP.Promise
@param {function} resolver
@param {String} label optional string for labeling the promise.
Useful for tooling.
@constructor
*/
var Promise = function () {
function Promise(resolver, label) {
this._id = counter++;
this._label = label;
this._state = undefined;
this._result = undefined;
this._subscribers = [];
config.instrument && instrument('created', this);
if (noop !== resolver) {
typeof resolver !== 'function' && needsResolver();
this instanceof Promise ? initializePromise(this, resolver) : needsNew();
}
}
Promise.prototype._onError = function _onError(reason) {
var _this = this;
config.after(function () {
if (_this._onError) {
config.trigger('error', reason, _this._label);
}
});
};
/**
`catch` is simply sugar for `then(undefined, onRejection)` which makes it the same
as the catch block of a try/catch statement.
```js
function findAuthor(){
throw new Error('couldn\'t find that author');
}
// synchronous
try {
findAuthor();
} catch(reason) {
// something went wrong
}
// async with promises
findAuthor().catch(function(reason){
// something went wrong
});
```
@method catch
@param {Function} onRejection
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.catch = function _catch(onRejection, label) {
return this.then(undefined, onRejection, label);
};
/**
`finally` will be invoked regardless of the promise's fate just as native
try/catch/finally behaves
Synchronous example:
```js
findAuthor() {
if (Math.random() > 0.5) {
throw new Error();
}
return new Author();
}
try {
return findAuthor(); // succeed or fail
} catch(error) {
return findOtherAuthor();
} finally {
// always runs
// doesn't affect the return value
}
```
Asynchronous example:
```js
findAuthor().catch(function(reason){
return findOtherAuthor();
}).finally(function(){
// author was either found, or not
});
```
@method finally
@param {Function} callback
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.finally = function _finally(callback, label) {
var promise = this;
var constructor = promise.constructor;
return promise.then(function (value) {
return constructor.resolve(callback()).then(function () {
return value;
});
}, function (reason) {
return constructor.resolve(callback()).then(function () {
throw reason;
});
}, label);
};
return Promise;
}();
Promise.cast = resolve$1; // deprecated
Promise.all = all;
Promise.race = race;
Promise.resolve = resolve$1;
Promise.reject = reject$1;
Promise.prototype._guidKey = guidKey;
/**
The primary way of interacting with a promise is through its `then` method,
which registers callbacks to receive either a promise's eventual value or the
reason why the promise cannot be fulfilled.
```js
findUser().then(function(user){
// user is available
}, function(reason){
// user is unavailable, and you are given the reason why
});
```
Chaining
--------
The return value of `then` is itself a promise. This second, 'downstream'
promise is resolved with the return value of the first promise's fulfillment
or rejection handler, or rejected if the handler throws an exception.
```js
findUser().then(function (user) {
return user.name;
}, function (reason) {
return 'default name';
}).then(function (userName) {
// If `findUser` fulfilled, `userName` will be the user's name, otherwise it
// will be `'default name'`
});
findUser().then(function (user) {
throw new Error('Found user, but still unhappy');
}, function (reason) {
throw new Error('`findUser` rejected and we\'re unhappy');
}).then(function (value) {
// never reached
}, function (reason) {
// if `findUser` fulfilled, `reason` will be 'Found user, but still unhappy'.
// If `findUser` rejected, `reason` will be '`findUser` rejected and we\'re unhappy'.
});
```
If the downstream promise does not specify a rejection handler, rejection reasons will be propagated further downstream.
```js
findUser().then(function (user) {
throw new PedagogicalException('Upstream error');
}).then(function (value) {
// never reached
}).then(function (value) {
// never reached
}, function (reason) {
// The `PedgagocialException` is propagated all the way down to here
});
```
Assimilation
------------
Sometimes the value you want to propagate to a downstream promise can only be
retrieved asynchronously. This can be achieved by returning a promise in the
fulfillment or rejection handler. The downstream promise will then be pending
until the returned promise is settled. This is called *assimilation*.
```js
findUser().then(function (user) {
return findCommentsByAuthor(user);
}).then(function (comments) {
// The user's comments are now available
});
```
If the assimliated promise rejects, then the downstream promise will also reject.
```js
findUser().then(function (user) {
return findCommentsByAuthor(user);
}).then(function (comments) {
// If `findCommentsByAuthor` fulfills, we'll have the value here
}, function (reason) {
// If `findCommentsByAuthor` rejects, we'll have the reason here
});
```
Simple Example
--------------
Synchronous Example
```javascript
let result;
try {
result = findResult();
// success
} catch(reason) {
// failure
}
```
Errback Example
```js
findResult(function(result, err){
if (err) {
// failure
} else {
// success
}
});
```
Promise Example;
```javascript
findResult().then(function(result){
// success
}, function(reason){
// failure
});
```
Advanced Example
--------------
Synchronous Example
```javascript
let author, books;
try {
author = findAuthor();
books = findBooksByAuthor(author);
// success
} catch(reason) {
// failure
}
```
Errback Example
```js
function foundBooks(books) {
}
function failure(reason) {
}
findAuthor(function(author, err){
if (err) {
failure(err);
// failure
} else {
try {
findBoooksByAuthor(author, function(books, err) {
if (err) {
failure(err);
} else {
try {
foundBooks(books);
} catch(reason) {
failure(reason);
}
}
});
} catch(error) {
failure(err);
}
// success
}
});
```
Promise Example;
```javascript
findAuthor().
then(findBooksByAuthor).
then(function(books){
// found books
}).catch(function(reason){
// something went wrong
});
```
@method then
@param {Function} onFulfillment
@param {Function} onRejection
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.then = then;
function Result() {
this.value = undefined;
}
var ERROR = new Result();
var GET_THEN_ERROR$1 = new Result();
function getThen$1(obj) {
try {
return obj.then;
} catch (error) {
ERROR.value = error;
return ERROR;
}
}
function tryApply(f, s, a) {
try {
f.apply(s, a);
} catch (error) {
ERROR.value = error;
return ERROR;
}
}
function makeObject(_, argumentNames) {
var obj = {};
var length = _.length;
var args = new Array(length);
for (var x = 0; x < length; x++) {
args[x] = _[x];
}
for (var i = 0; i < argumentNames.length; i++) {
var name = argumentNames[i];
obj[name] = args[i + 1];
}
return obj;
}
function arrayResult(_) {
var length = _.length;
var args = new Array(length - 1);
for (var i = 1; i < length; i++) {
args[i - 1] = _[i];
}
return args;
}
function wrapThenable(then, promise) {
return {
then: function (onFulFillment, onRejection) {
return then.call(promise, onFulFillment, onRejection);
}
};
}
/**
`RSVP.denodeify` takes a 'node-style' function and returns a function that
will return an `RSVP.Promise`. You can use `denodeify` in Node.js or the
browser when you'd prefer to use promises over using callbacks. For example,
`denodeify` transforms the following:
```javascript
let fs = require('fs');
fs.readFile('myfile.txt', function(err, data){
if (err) return handleError(err);
handleData(data);
});
```
into:
```javascript
let fs = require('fs');
let readFile = RSVP.denodeify(fs.readFile);
readFile('myfile.txt').then(handleData, handleError);
```
If the node function has multiple success parameters, then `denodeify`
just returns the first one:
```javascript
let request = RSVP.denodeify(require('request'));
request('http://example.com').then(function(res) {
// ...
});
```
However, if you need all success parameters, setting `denodeify`'s
second parameter to `true` causes it to return all success parameters
as an array:
```javascript
let request = RSVP.denodeify(require('request'), true);
request('http://example.com').then(function(result) {
// result[0] -> res
// result[1] -> body
});
```
Or if you pass it an array with names it returns the parameters as a hash:
```javascript
let request = RSVP.denodeify(require('request'), ['res', 'body']);
request('http://example.com').then(function(result) {
// result.res
// result.body
});
```
Sometimes you need to retain the `this`:
```javascript
let app = require('express')();
let render = RSVP.denodeify(app.render.bind(app));
```
The denodified function inherits from the original function. It works in all
environments, except IE 10 and below. Consequently all properties of the original
function are available to you. However, any properties you change on the
denodeified function won't be changed on the original function. Example:
```javascript
let request = RSVP.denodeify(require('request')),
cookieJar = request.jar(); // <- Inheritance is used here
request('http://example.com', {jar: cookieJar}).then(function(res) {
// cookieJar.cookies holds now the cookies returned by example.com
});
```
Using `denodeify` makes it easier to compose asynchronous operations instead
of using callbacks. For example, instead of:
```javascript
let fs = require('fs');
fs.readFile('myfile.txt', function(err, data){
if (err) { ... } // Handle error
fs.writeFile('myfile2.txt', data, function(err){
if (err) { ... } // Handle error
console.log('done')
});
});
```
you can chain the operations together using `then` from the returned promise:
```javascript
let fs = require('fs');
let readFile = RSVP.denodeify(fs.readFile);
let writeFile = RSVP.denodeify(fs.writeFile);
readFile('myfile.txt').then(function(data){
return writeFile('myfile2.txt', data);
}).then(function(){
console.log('done')
}).catch(function(error){
// Handle error
});
```
@method denodeify
@static
@for RSVP
@param {Function} nodeFunc a 'node-style' function that takes a callback as
its last argument. The callback expects an error to be passed as its first
argument (if an error occurred, otherwise null), and the value from the
operation as its second argument ('function(err, value){ }').
@param {Boolean|Array} [options] An optional paramter that if set
to `true` causes the promise to fulfill with the callback's success arguments
as an array. This is useful if the node function has multiple success
paramters. If you set this paramter to an array with names, the promise will
fulfill with a hash with these names as keys and the success parameters as
values.
@return {Function} a function that wraps `nodeFunc` to return an
`RSVP.Promise`
@static
*/
function denodeify(nodeFunc, options) {
var fn = function () {
var self = this;
var l = arguments.length;
var args = new Array(l + 1);
var promiseInput = false;
for (var i = 0; i < l; ++i) {
var arg = arguments[i];
if (!promiseInput) {
// TODO: clean this up
promiseInput = needsPromiseInput(arg);
if (promiseInput === GET_THEN_ERROR$1) {
var p = new Promise(noop);
reject(p, GET_THEN_ERROR$1.value);
return p;
} else if (promiseInput && promiseInput !== true) {
arg = wrapThenable(promiseInput, arg);
}
}
args[i] = arg;
}
var promise = new Promise(noop);
args[l] = function (err, val) {
if (err) reject(promise, err);else if (options === undefined) resolve(promise, val);else if (options === true) resolve(promise, arrayResult(arguments));else if (isArray(options)) resolve(promise, makeObject(arguments, options));else resolve(promise, val);
};
if (promiseInput) {
return handlePromiseInput(promise, args, nodeFunc, self);
} else {
return handleValueInput(promise, args, nodeFunc, self);
}
};
fn.__proto__ = nodeFunc;
return fn;
}
function handleValueInput(promise, args, nodeFunc, self) {
var result = tryApply(nodeFunc, self, args);
if (result === ERROR) {
reject(promise, result.value);
}
return promise;
}
function handlePromiseInput(promise, args, nodeFunc, self) {
return Promise.all(args).then(function (args) {
var result = tryApply(nodeFunc, self, args);
if (result === ERROR) {
reject(promise, result.value);
}
return promise;
});
}
function needsPromiseInput(arg) {
if (arg && typeof arg === 'object') {
if (arg.constructor === Promise) {
return true;
} else {
return getThen$1(arg);
}
} else {
return false;
}
}
/**
This is a convenient alias for `RSVP.Promise.all`.
@method all
@static
@for RSVP
@param {Array} array Array of promises.
@param {String} label An optional label. This is useful
for tooling.
*/
function all$1(array, label) {
return Promise.all(array, label);
}
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var AllSettled = function (_Enumerator) {
_inherits(AllSettled, _Enumerator);
function AllSettled(Constructor, entries, label) {
return _possibleConstructorReturn(this, _Enumerator.call(this, Constructor, entries, false /* don't abort on reject */, label));
}
return AllSettled;
}(Enumerator);
AllSettled.prototype._makeResult = makeSettledResult;
/**
`RSVP.allSettled` is similar to `RSVP.all`, but instead of implementing
a fail-fast method, it waits until all the promises have returned and
shows you all the results. This is useful if you want to handle multiple
promises' failure states together as a set.
Returns a promise that is fulfilled when all the given promises have been
settled. The return promise is fulfilled with an array of the states of
the promises passed into the `promises` array argument.
Each state object will either indicate fulfillment or rejection, and
provide the corresponding value or reason. The states will take one of
the following formats:
```javascript
{ state: 'fulfilled', value: value }
or
{ state: 'rejected', reason: reason }
```
Example:
```javascript
let promise1 = RSVP.Promise.resolve(1);
let promise2 = RSVP.Promise.reject(new Error('2'));
let promise3 = RSVP.Promise.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
RSVP.allSettled(promises).then(function(array){
// array == [
// { state: 'fulfilled', value: 1 },
// { state: 'rejected', reason: Error },
// { state: 'rejected', reason: Error }
// ]
// Note that for the second item, reason.message will be '2', and for the
// third item, reason.message will be '3'.
}, function(error) {
// Not run. (This block would only be called if allSettled had failed,
// for instance if passed an incorrect argument type.)
});
```
@method allSettled
@static
@for RSVP
@param {Array} entries
@param {String} label - optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled with an array of the settled
states of the constituent promises.
*/
function allSettled(entries, label) {
if (!isArray(entries)) {
return Promise.reject(new TypeError("Promise.allSettled must be called with an array"), label);
}
return new AllSettled(Promise, entries, label).promise;
}
/**
This is a convenient alias for `RSVP.Promise.race`.
@method race
@static
@for RSVP
@param {Array} array Array of promises.
@param {String} label An optional label. This is useful
for tooling.
*/
function race$1(array, label) {
return Promise.race(array, label);
}
function _possibleConstructorReturn$1(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits$1(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var hasOwnProperty = Object.prototype.hasOwnProperty;
var PromiseHash = function (_Enumerator) {
_inherits$1(PromiseHash, _Enumerator);
function PromiseHash(Constructor, object) {
var abortOnReject = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : true;
var label = arguments[3];
return _possibleConstructorReturn$1(this, _Enumerator.call(this, Constructor, object, abortOnReject, label));
}
PromiseHash.prototype._init = function _init(Constructor, object) {
this._result = {};
this._enumerate(object);
if (this._remaining === 0) {
fulfill(this.promise, this._result);
}
};
PromiseHash.prototype._enumerate = function _enumerate(input) {
var promise = this.promise;
var results = [];
for (var key in input) {
if (hasOwnProperty.call(input, key)) {
results.push({
position: key,
entry: input[key]
});
}
}
var length = results.length;
this._remaining = length;
var result = void 0;
for (var i = 0; promise._state === PENDING && i < length; i++) {
result = results[i];
this._eachEntry(result.entry, result.position);
}
};
return PromiseHash;
}(Enumerator);
/**
`RSVP.hash` is similar to `RSVP.all`, but takes an object instead of an array
for its `promises` argument.
Returns a promise that is fulfilled when all the given promises have been
fulfilled, or rejected if any of them become rejected. The returned promise
is fulfilled with a hash that has the same key names as the `promises` object
argument. If any of the values in the object are not promises, they will
simply be copied over to the fulfilled object.
Example:
```javascript
let promises = {
myPromise: RSVP.resolve(1),
yourPromise: RSVP.resolve(2),
theirPromise: RSVP.resolve(3),
notAPromise: 4
};
RSVP.hash(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: 1,
// yourPromise: 2,
// theirPromise: 3,
// notAPromise: 4
// }
});
````
If any of the `promises` given to `RSVP.hash` are rejected, the first promise
that is rejected will be given as the reason to the rejection handler.
Example:
```javascript
let promises = {
myPromise: RSVP.resolve(1),
rejectedPromise: RSVP.reject(new Error('rejectedPromise')),
anotherRejectedPromise: RSVP.reject(new Error('anotherRejectedPromise')),
};
RSVP.hash(promises).then(function(hash){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === 'rejectedPromise'
});
```
An important note: `RSVP.hash` is intended for plain JavaScript objects that
are just a set of keys and values. `RSVP.hash` will NOT preserve prototype
chains.
Example:
```javascript
function MyConstructor(){
this.example = RSVP.resolve('Example');
}
MyConstructor.prototype = {
protoProperty: RSVP.resolve('Proto Property')
};
let myObject = new MyConstructor();
RSVP.hash(myObject).then(function(hash){
// protoProperty will not be present, instead you will just have an
// object that looks like:
// {
// example: 'Example'
// }
//
// hash.hasOwnProperty('protoProperty'); // false
// 'undefined' === typeof hash.protoProperty
});
```
@method hash
@static
@for RSVP
@param {Object} object
@param {String} label optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when all properties of `promises`
have been fulfilled, or rejected if any of them become rejected.
*/
function hash(object, label) {
if (!isObject(object)) {
return Promise.reject(new TypeError("Promise.hash must be called with an object"), label);
}
return new PromiseHash(Promise, object, label).promise;
}
function _possibleConstructorReturn$2(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits$2(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var HashSettled = function (_PromiseHash) {
_inherits$2(HashSettled, _PromiseHash);
function HashSettled(Constructor, object, label) {
return _possibleConstructorReturn$2(this, _PromiseHash.call(this, Constructor, object, false, label));
}
return HashSettled;
}(PromiseHash);
HashSettled.prototype._makeResult = makeSettledResult;
/**
`RSVP.hashSettled` is similar to `RSVP.allSettled`, but takes an object
instead of an array for its `promises` argument.
Unlike `RSVP.all` or `RSVP.hash`, which implement a fail-fast method,
but like `RSVP.allSettled`, `hashSettled` waits until all the
constituent promises have returned and then shows you all the results
with their states and values/reasons. This is useful if you want to
handle multiple promises' failure states together as a set.
Returns a promise that is fulfilled when all the given promises have been
settled, or rejected if the passed parameters are invalid.
The returned promise is fulfilled with a hash that has the same key names as
the `promises` object argument. If any of the values in the object are not
promises, they will be copied over to the fulfilled object and marked with state
'fulfilled'.
Example:
```javascript
let promises = {
myPromise: RSVP.Promise.resolve(1),
yourPromise: RSVP.Promise.resolve(2),
theirPromise: RSVP.Promise.resolve(3),
notAPromise: 4
};
RSVP.hashSettled(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: { state: 'fulfilled', value: 1 },
// yourPromise: { state: 'fulfilled', value: 2 },
// theirPromise: { state: 'fulfilled', value: 3 },
// notAPromise: { state: 'fulfilled', value: 4 }
// }
});
```
If any of the `promises` given to `RSVP.hash` are rejected, the state will
be set to 'rejected' and the reason for rejection provided.
Example:
```javascript
let promises = {
myPromise: RSVP.Promise.resolve(1),
rejectedPromise: RSVP.Promise.reject(new Error('rejection')),
anotherRejectedPromise: RSVP.Promise.reject(new Error('more rejection')),
};
RSVP.hashSettled(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: { state: 'fulfilled', value: 1 },
// rejectedPromise: { state: 'rejected', reason: Error },
// anotherRejectedPromise: { state: 'rejected', reason: Error },
// }
// Note that for rejectedPromise, reason.message == 'rejection',
// and for anotherRejectedPromise, reason.message == 'more rejection'.
});
```
An important note: `RSVP.hashSettled` is intended for plain JavaScript objects that
are just a set of keys and values. `RSVP.hashSettled` will NOT preserve prototype
chains.
Example:
```javascript
function MyConstructor(){
this.example = RSVP.Promise.resolve('Example');
}
MyConstructor.prototype = {
protoProperty: RSVP.Promise.resolve('Proto Property')
};
let myObject = new MyConstructor();
RSVP.hashSettled(myObject).then(function(hash){
// protoProperty will not be present, instead you will just have an
// object that looks like:
// {
// example: { state: 'fulfilled', value: 'Example' }
// }
//
// hash.hasOwnProperty('protoProperty'); // false
// 'undefined' === typeof hash.protoProperty
});
```
@method hashSettled
@for RSVP
@param {Object} object
@param {String} label optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when when all properties of `promises`
have been settled.
@static
*/
function hashSettled(object, label) {
if (!isObject(object)) {
return Promise.reject(new TypeError("RSVP.hashSettled must be called with an object"), label);
}
return new HashSettled(Promise, object, false, label).promise;
}
/**
`RSVP.rethrow` will rethrow an error on the next turn of the JavaScript event
loop in order to aid debugging.
Promises A+ specifies that any exceptions that occur with a promise must be
caught by the promises implementation and bubbled to the last handler. For
this reason, it is recommended that you always specify a second rejection
handler function to `then`. However, `RSVP.rethrow` will throw the exception
outside of the promise, so it bubbles up to your console if in the browser,
or domain/cause uncaught exception in Node. `rethrow` will also throw the
error again so the error can be handled by the promise per the spec.
```javascript
function throws(){
throw new Error('Whoops!');
}
let promise = new RSVP.Promise(function(resolve, reject){
throws();
});
promise.catch(RSVP.rethrow).then(function(){
// Code here doesn't run because the promise became rejected due to an
// error!
}, function (err){
// handle the error here
});
```
The 'Whoops' error will be thrown on the next turn of the event loop
and you can watch for it in your console. You can also handle it using a
rejection handler given to `.then` or `.catch` on the returned promise.
@method rethrow
@static
@for RSVP
@param {Error} reason reason the promise became rejected.
@throws Error
@static
*/
function rethrow(reason) {
setTimeout(function () {
throw reason;
});
throw reason;
}
/**
`RSVP.defer` returns an object similar to jQuery's `$.Deferred`.
`RSVP.defer` should be used when porting over code reliant on `$.Deferred`'s
interface. New code should use the `RSVP.Promise` constructor instead.
The object returned from `RSVP.defer` is a plain object with three properties:
* promise - an `RSVP.Promise`.
* reject - a function that causes the `promise` property on this object to
become rejected
* resolve - a function that causes the `promise` property on this object to
become fulfilled.
Example:
```javascript
let deferred = RSVP.defer();
deferred.resolve("Success!");
deferred.promise.then(function(value){
// value here is "Success!"
});
```
@method defer
@static
@for RSVP
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Object}
*/
function defer(label) {
var deferred = { resolve: undefined, reject: undefined };
deferred.promise = new Promise(function (resolve, reject) {
deferred.resolve = resolve;
deferred.reject = reject;
}, label);
return deferred;
}
/**
`RSVP.map` is similar to JavaScript's native `map` method, except that it
waits for all promises to become fulfilled before running the `mapFn` on
each item in given to `promises`. `RSVP.map` returns a promise that will
become fulfilled with the result of running `mapFn` on the values the promises
become fulfilled with.
For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [ promise1, promise2, promise3 ];
let mapFn = function(item){
return item + 1;
};
RSVP.map(promises, mapFn).then(function(result){
// result is [ 2, 3, 4 ]
});
```
If any of the `promises` given to `RSVP.map` are rejected, the first promise
that is rejected will be given as an argument to the returned promise's
rejection handler. For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error('2'));
let promise3 = RSVP.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
let mapFn = function(item){
return item + 1;
};
RSVP.map(promises, mapFn).then(function(array){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === '2'
});
```
`RSVP.map` will also wait if a promise is returned from `mapFn`. For example,
say you want to get all comments from a set of blog posts, but you need
the blog posts first because they contain a url to those comments.
```javscript
let mapFn = function(blogPost){
// getComments does some ajax and returns an RSVP.Promise that is fulfilled
// with some comments data
return getComments(blogPost.comments_url);
};
// getBlogPosts does some ajax and returns an RSVP.Promise that is fulfilled
// with some blog post data
RSVP.map(getBlogPosts(), mapFn).then(function(comments){
// comments is the result of asking the server for the comments
// of all blog posts returned from getBlogPosts()
});
```
@method map
@static
@for RSVP
@param {Array} promises
@param {Function} mapFn function to be called on each fulfilled promise.
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled with the result of calling
`mapFn` on each fulfilled promise or value when they become fulfilled.
The promise will be rejected if any of the given `promises` become rejected.
@static
*/
function map(promises, mapFn, label) {
if (!isArray(promises)) {
return Promise.reject(new TypeError("RSVP.map must be called with an array"), label);
}
if (!isFunction(mapFn)) {
return Promise.reject(new TypeError("RSVP.map expects a function as a second argument"), label);
}
return Promise.all(promises, label).then(function (values) {
var length = values.length;
var results = new Array(length);
for (var i = 0; i < length; i++) {
results[i] = mapFn(values[i]);
}
return Promise.all(results, label);
});
}
/**
This is a convenient alias for `RSVP.Promise.resolve`.
@method resolve
@static
@for RSVP
@param {*} value value that the returned promise will be resolved with
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise that will become fulfilled with the given
`value`
*/
function resolve$2(value, label) {
return Promise.resolve(value, label);
}
/**
This is a convenient alias for `RSVP.Promise.reject`.
@method reject
@static
@for RSVP
@param {*} reason value that the returned promise will be rejected with.
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise rejected with the given `reason`.
*/
function reject$2(reason, label) {
return Promise.reject(reason, label);
}
/**
`RSVP.filter` is similar to JavaScript's native `filter` method, except that it
waits for all promises to become fulfilled before running the `filterFn` on
each item in given to `promises`. `RSVP.filter` returns a promise that will
become fulfilled with the result of running `filterFn` on the values the
promises become fulfilled with.
For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [promise1, promise2, promise3];
let filterFn = function(item){
return item > 1;
};
RSVP.filter(promises, filterFn).then(function(result){
// result is [ 2, 3 ]
});
```
If any of the `promises` given to `RSVP.filter` are rejected, the first promise
that is rejected will be given as an argument to the returned promise's
rejection handler. For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error('2'));
let promise3 = RSVP.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
let filterFn = function(item){
return item > 1;
};
RSVP.filter(promises, filterFn).then(function(array){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === '2'
});
```
`RSVP.filter` will also wait for any promises returned from `filterFn`.
For instance, you may want to fetch a list of users then return a subset
of those users based on some asynchronous operation:
```javascript
let alice = { name: 'alice' };
let bob = { name: 'bob' };
let users = [ alice, bob ];
let promises = users.map(function(user){
return RSVP.resolve(user);
});
let filterFn = function(user){
// Here, Alice has permissions to create a blog post, but Bob does not.
return getPrivilegesForUser(user).then(function(privs){
return privs.can_create_blog_post === true;
});
};
RSVP.filter(promises, filterFn).then(function(users){
// true, because the server told us only Alice can create a blog post.
users.length === 1;
// false, because Alice is the only user present in `users`
users[0] === bob;
});
```
@method filter
@static
@for RSVP
@param {Array} promises
@param {Function} filterFn - function to be called on each resolved value to
filter the final results.
@param {String} label optional string describing the promise. Useful for
tooling.
@return {Promise}
*/
function resolveAll(promises, label) {
return Promise.all(promises, label);
}
function resolveSingle(promise, label) {
return Promise.resolve(promise, label).then(function (promises) {
return resolveAll(promises, label);
});
}
function filter(promises, filterFn, label) {
if (!isArray(promises) && !(isObject(promises) && promises.then !== undefined)) {
return Promise.reject(new TypeError("RSVP.filter must be called with an array or promise"), label);
}
if (!isFunction(filterFn)) {
return Promise.reject(new TypeError("RSVP.filter expects function as a second argument"), label);
}
var promise = isArray(promises) ? resolveAll(promises, label) : resolveSingle(promises, label);
return promise.then(function (values) {
var length = values.length;
var filtered = new Array(length);
for (var i = 0; i < length; i++) {
filtered[i] = filterFn(values[i]);
}
return resolveAll(filtered, label).then(function (filtered) {
var results = new Array(length);
var newLength = 0;
for (var _i = 0; _i < length; _i++) {
if (filtered[_i]) {
results[newLength] = values[_i];
newLength++;
}
}
results.length = newLength;
return results;
});
});
}
var len = 0;
var vertxNext = void 0;
function asap(callback, arg) {
queue$1[len] = callback;
queue$1[len + 1] = arg;
len += 2;
if (len === 2) {
// If len is 1, that means that we need to schedule an async flush.
// If additional callbacks are queued before the queue is flushed, they
// will be processed by this flush that we are scheduling.
scheduleFlush$1();
}
}
var browserWindow = typeof window !== 'undefined' ? window : undefined;
var browserGlobal = browserWindow || {};
var BrowserMutationObserver = browserGlobal.MutationObserver || browserGlobal.WebKitMutationObserver;
var isNode = typeof self === 'undefined' && typeof process !== 'undefined' && {}.toString.call(process) === '[object process]';
// test for web worker but not in IE10
var isWorker = typeof Uint8ClampedArray !== 'undefined' && typeof importScripts !== 'undefined' && typeof MessageChannel !== 'undefined';
// node
function useNextTick() {
var nextTick = process.nextTick;
// node version 0.10.x displays a deprecation warning when nextTick is used recursively
// setImmediate should be used instead instead
var version = process.versions.node.match(/^(?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)$/);
if (Array.isArray(version) && version[1] === '0' && version[2] === '10') {
nextTick = setImmediate;
}
return function () {
return nextTick(flush);
};
}
// vertx
function useVertxTimer() {
if (typeof vertxNext !== 'undefined') {
return function () {
vertxNext(flush);
};
}
return useSetTimeout();
}
function useMutationObserver() {
var iterations = 0;
var observer = new BrowserMutationObserver(flush);
var node = document.createTextNode('');
observer.observe(node, { characterData: true });
return function () {
return node.data = iterations = ++iterations % 2;
};
}
// web worker
function useMessageChannel() {
var channel = new MessageChannel();
channel.port1.onmessage = flush;
return function () {
return channel.port2.postMessage(0);
};
}
function useSetTimeout() {
return function () {
return setTimeout(flush, 1);
};
}
var queue$1 = new Array(1000);
function flush() {
for (var i = 0; i < len; i += 2) {
var callback = queue$1[i];
var arg = queue$1[i + 1];
callback(arg);
queue$1[i] = undefined;
queue$1[i + 1] = undefined;
}
len = 0;
}
function attemptVertex() {
try {
var r = require;
var vertx = r('vertx');
vertxNext = vertx.runOnLoop || vertx.runOnContext;
return useVertxTimer();
} catch (e) {
return useSetTimeout();
}
}
var scheduleFlush$1 = void 0;
// Decide what async method to use to triggering processing of queued callbacks:
if (isNode) {
scheduleFlush$1 = useNextTick();
} else if (BrowserMutationObserver) {
scheduleFlush$1 = useMutationObserver();
} else if (isWorker) {
scheduleFlush$1 = useMessageChannel();
} else if (browserWindow === undefined && typeof require === 'function') {
scheduleFlush$1 = attemptVertex();
} else {
scheduleFlush$1 = useSetTimeout();
}
var platform = void 0;
/* global self */
if (typeof self === 'object') {
platform = self;
/* global global */
} else if (typeof global === 'object') {
platform = global;
} else {
throw new Error('no global: `self` or `global` found');
}
var _asap$cast$Promise$Ev;
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
// defaults
config.async = asap;
config.after = function (cb) {
return setTimeout(cb, 0);
};
var cast = resolve$2;
var async = function (callback, arg) {
return config.async(callback, arg);
};
function on() {
config['on'].apply(config, arguments);
}
function off() {
config['off'].apply(config, arguments);
}
// Set up instrumentation through `window.__PROMISE_INTRUMENTATION__`
if (typeof window !== 'undefined' && typeof window['__PROMISE_INSTRUMENTATION__'] === 'object') {
var callbacks = window['__PROMISE_INSTRUMENTATION__'];
configure('instrument', true);
for (var eventName in callbacks) {
if (callbacks.hasOwnProperty(eventName)) {
on(eventName, callbacks[eventName]);
}
}
}
// the default export here is for backwards compat:
// https://github.com/tildeio/rsvp.js/issues/434
var rsvp = (_asap$cast$Promise$Ev = {
asap: asap,
cast: cast,
Promise: Promise,
EventTarget: EventTarget,
all: all$1,
allSettled: allSettled,
race: race$1,
hash: hash,
hashSettled: hashSettled,
rethrow: rethrow,
defer: defer,
denodeify: denodeify,
configure: configure,
on: on,
off: off,
resolve: resolve$2,
reject: reject$2,
map: map
}, _defineProperty(_asap$cast$Promise$Ev, 'async', async), _defineProperty(_asap$cast$Promise$Ev, 'filter', filter), _asap$cast$Promise$Ev);
exports['default'] = rsvp;
exports.asap = asap;
exports.cast = cast;
exports.Promise = Promise;
exports.EventTarget = EventTarget;
exports.all = all$1;
exports.allSettled = allSettled;
exports.race = race$1;
exports.hash = hash;
exports.hashSettled = hashSettled;
exports.rethrow = rethrow;
exports.defer = defer;
exports.denodeify = denodeify;
exports.configure = configure;
exports.on = on;
exports.off = off;
exports.resolve = resolve$2;
exports.reject = reject$2;
exports.map = map;
exports.async = async;
exports.filter = filter;
Object.defineProperty(exports, '__esModule', { value: true });
})));
//
var EPUBJS = EPUBJS || {};
EPUBJS.core = {};
var ELEMENT_NODE = 1;
var TEXT_NODE = 3;
var COMMENT_NODE = 8;
var DOCUMENT_NODE = 9;
//-- Get a element for an id
EPUBJS.core.getEl = function(elem) {
return document.getElementById(elem);
};
//-- Get all elements for a class
EPUBJS.core.getEls = function(classes) {
return document.getElementsByClassName(classes);
};
EPUBJS.core.request = function(url, type, withCredentials) {
var supportsURL = window.URL;
var BLOB_RESPONSE = supportsURL ? "blob" : "arraybuffer";
var deferred = new RSVP.defer();
var xhr = new XMLHttpRequest();
var uri;
//-- Check from PDF.js:
// https://github.com/mozilla/pdf.js/blob/master/web/compatibility.js
var xhrPrototype = XMLHttpRequest.prototype;
var handler = function() {
var r;
if (this.readyState != this.DONE) return;
if ((this.status === 200 || this.status === 0) && this.response) { // Android & Firefox reporting 0 for local & blob urls
if (type == 'xml'){
// If this.responseXML wasn't set, try to parse using a DOMParser from text
if(!this.responseXML) {
r = new DOMParser().parseFromString(this.response, "application/xml");
} else {
r = this.responseXML;
}
} else if (type == 'xhtml') {
if (!this.responseXML){
r = new DOMParser().parseFromString(this.response, "application/xhtml+xml");
} else {
r = this.responseXML;
}
} else if (type == 'html') {
if (!this.responseXML){
r = new DOMParser().parseFromString(this.response, "text/html");
} else {
r = this.responseXML;
}
} else if (type == 'json') {
r = JSON.parse(this.response);
} else if (type == 'blob') {
if (supportsURL) {
r = this.response;
} else {
//-- Safari doesn't support responseType blob, so create a blob from arraybuffer
r = new Blob([this.response]);
}
} else {
r = this.response;
}
deferred.resolve(r);
} else {
deferred.reject({
message : this.response,
stack : new Error().stack
});
}
};
if (!('overrideMimeType' in xhrPrototype)) {
// IE10 might have response, but not overrideMimeType
Object.defineProperty(xhrPrototype, 'overrideMimeType', {
value: function xmlHttpRequestOverrideMimeType(mimeType) {}
});
}
xhr.onreadystatechange = handler;
xhr.open("GET", url, true);
if(withCredentials) {
xhr.withCredentials = true;
}
// If type isn't set, determine it from the file extension
if(!type) {
uri = EPUBJS.core.uri(url);
type = uri.extension;
type = {
'htm': 'html'
}[type] || type;
}
if(type == 'blob'){
xhr.responseType = BLOB_RESPONSE;
}
if(type == "json") {
xhr.setRequestHeader("Accept", "application/json");
}
if(type == 'xml') {
xhr.responseType = "document";
xhr.overrideMimeType('text/xml'); // for OPF parsing
}
if(type == 'xhtml') {
xhr.responseType = "document";
}
if(type == 'html') {
xhr.responseType = "document";
}
if(type == "binary") {
xhr.responseType = "arraybuffer";
}
xhr.send();
return deferred.promise;
};
EPUBJS.core.toArray = function(obj) {
var arr = [];
for (var member in obj) {
var newitm;
if ( obj.hasOwnProperty(member) ) {
newitm = obj[member];
newitm.ident = member;
arr.push(newitm);
}
}
return arr;
};
//-- Parse the different parts of a url, returning a object
EPUBJS.core.uri = function(url){
var uri = {
protocol : '',
host : '',
path : '',
origin : '',
directory : '',
base : '',
filename : '',
extension : '',
fragment : '',
href : url
},
blob = url.indexOf('blob:'),
doubleSlash = url.indexOf('://'),
search = url.indexOf('?'),
fragment = url.indexOf("#"),
withoutProtocol,
dot,
firstSlash;
if(blob === 0) {
uri.protocol = "blob";
uri.base = url.indexOf(0, fragment);
return uri;
}
if(fragment != -1) {
uri.fragment = url.slice(fragment + 1);
url = url.slice(0, fragment);
}
if(search != -1) {
uri.search = url.slice(search + 1);
url = url.slice(0, search);
href = uri.href;
}
if(doubleSlash != -1) {
uri.protocol = url.slice(0, doubleSlash);
withoutProtocol = url.slice(doubleSlash+3);
firstSlash = withoutProtocol.indexOf('/');
if(firstSlash === -1) {
uri.host = uri.path;
uri.path = "";
} else {
uri.host = withoutProtocol.slice(0, firstSlash);
uri.path = withoutProtocol.slice(firstSlash);
}
uri.origin = uri.protocol + "://" + uri.host;
uri.directory = EPUBJS.core.folder(uri.path);
uri.base = uri.origin + uri.directory;
// return origin;
} else {
uri.path = url;
uri.directory = EPUBJS.core.folder(url);
uri.base = uri.directory;
}
//-- Filename
uri.filename = url.replace(uri.base, '');
dot = uri.filename.lastIndexOf('.');
if(dot != -1) {
uri.extension = uri.filename.slice(dot+1);
}
return uri;
};
//-- Parse out the folder, will return everything before the last slash
EPUBJS.core.folder = function(url){
var lastSlash = url.lastIndexOf('/');
if(lastSlash == -1) var folder = '';
folder = url.slice(0, lastSlash + 1);
return folder;
};
//-- https://github.com/ebidel/filer.js/blob/master/src/filer.js#L128
EPUBJS.core.dataURLToBlob = function(dataURL) {
var BASE64_MARKER = ';base64,',
parts, contentType, raw, rawLength, uInt8Array;
if (dataURL.indexOf(BASE64_MARKER) == -1) {
parts = dataURL.split(',');
contentType = parts[0].split(':')[1];
raw = parts[1];
return new Blob([raw], {type: contentType});
}
parts = dataURL.split(BASE64_MARKER);
contentType = parts[0].split(':')[1];
raw = window.atob(parts[1]);
rawLength = raw.length;
uInt8Array = new Uint8Array(rawLength);
for (var i = 0; i < rawLength; ++i) {
uInt8Array[i] = raw.charCodeAt(i);
}
return new Blob([uInt8Array], {type: contentType});
};
//-- Load scripts async: http://stackoverflow.com/questions/7718935/load-scripts-asynchronously
EPUBJS.core.addScript = function(src, callback, target) {
var s, r;
r = false;
s = document.createElement('script');
s.type = 'text/javascript';
s.async = false;
s.src = src;
s.onload = s.onreadystatechange = function() {
if ( !r && (!this.readyState || this.readyState == 'complete') ) {
r = true;
if(callback) callback();
}
};
target = target || document.body;
target.appendChild(s);
};
EPUBJS.core.addScripts = function(srcArr, callback, target) {
var total = srcArr.length,
curr = 0,
cb = function(){
curr++;
if(total == curr){
if(callback) callback();
}else{
EPUBJS.core.addScript(srcArr[curr], cb, target);
}
};
EPUBJS.core.addScript(srcArr[curr], cb, target);
};
EPUBJS.core.addCss = function(src, callback, target) {
var s, r;
r = false;
s = document.createElement('link');
s.type = 'text/css';
s.rel = "stylesheet";
s.href = src;
s.onload = s.onreadystatechange = function() {
if ( !r && (!this.readyState || this.readyState == 'complete') ) {
r = true;
if(callback) callback();
}
};
target = target || document.body;
target.appendChild(s);
};
EPUBJS.core.prefixed = function(unprefixed) {
var vendors = ["Webkit", "Moz", "O", "ms" ],
prefixes = ['-Webkit-', '-moz-', '-o-', '-ms-'],
upper = unprefixed[0].toUpperCase() + unprefixed.slice(1),
length = vendors.length;
if (typeof(document.documentElement.style[unprefixed]) != 'undefined') {
return unprefixed;
}
for ( var i=0; i < length; i++ ) {
if (typeof(document.documentElement.style[vendors[i] + upper]) != 'undefined') {
return vendors[i] + upper;
}
}
return unprefixed;
};
EPUBJS.core.resolveUrl = function(base, path) {
var url,
segments = [],
uri = EPUBJS.core.uri(path),
folders = base.split("/"),
paths;
if(uri.host) {
return path;
}
folders.pop();
paths = path.split("/");
paths.forEach(function(p){
if(p === ".."){
folders.pop();
}else{
segments.push(p);
}
});
url = folders.concat(segments);
return url.join("/");
};
// http://stackoverflow.com/questions/105034/how-to-create-a-guid-uuid-in-javascript
EPUBJS.core.uuid = function() {
var d = new Date().getTime();
var uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = (d + Math.random()*16)%16 | 0;
d = Math.floor(d/16);
return (c=='x' ? r : (r&0x7|0x8)).toString(16);
});
return uuid;
};
// Fast quicksort insert for sorted array -- based on:
// http://stackoverflow.com/questions/1344500/efficient-way-to-insert-a-number-into-a-sorted-array-of-numbers
EPUBJS.core.insert = function(item, array, compareFunction) {
var location = EPUBJS.core.locationOf(item, array, compareFunction);
array.splice(location, 0, item);
return location;
};
EPUBJS.core.locationOf = function(item, array, compareFunction, _start, _end) {
var start = _start || 0;
var end = _end || array.length;
var pivot = parseInt(start + (end - start) / 2);
var compared;
if(!compareFunction){
compareFunction = function(a, b) {
if(a > b) return 1;
if(a < b) return -1;
if(a = b) return 0;
};
}
if(end-start <= 0) {
return pivot;
}
compared = compareFunction(array[pivot], item);
if(end-start === 1) {
return compared > 0 ? pivot : pivot + 1;
}
if(compared === 0) {
return pivot;
}
if(compared === -1) {
return EPUBJS.core.locationOf(item, array, compareFunction, pivot, end);
} else{
return EPUBJS.core.locationOf(item, array, compareFunction, start, pivot);
}
};
EPUBJS.core.indexOfSorted = function(item, array, compareFunction, _start, _end) {
var start = _start || 0;
var end = _end || array.length;
var pivot = parseInt(start + (end - start) / 2);
var compared;
if(!compareFunction){
compareFunction = function(a, b) {
if(a > b) return 1;
if(a < b) return -1;
if(a = b) return 0;
};
}
if(end-start <= 0) {
return -1; // Not found
}
compared = compareFunction(array[pivot], item);
if(end-start === 1) {
return compared === 0 ? pivot : -1;
}
if(compared === 0) {
return pivot; // Found
}
if(compared === -1) {
return EPUBJS.core.indexOfSorted(item, array, compareFunction, pivot, end);
} else{
return EPUBJS.core.indexOfSorted(item, array, compareFunction, start, pivot);
}
};
EPUBJS.core.queue = function(_scope){
var _q = [];
var scope = _scope;
// Add an item to the queue
var enqueue = function(funcName, args, context) {
_q.push({
"funcName" : funcName,
"args" : args,
"context" : context
});
return _q;
};
// Run one item
var dequeue = function(){
var inwait;
if(_q.length) {
inwait = _q.shift();
// Defer to any current tasks
// setTimeout(function(){
scope[inwait.funcName].apply(inwait.context || scope, inwait.args);
// }, 0);
}
};
// Run All
var flush = function(){
while(_q.length) {
dequeue();
}
};
// Clear all items in wait
var clear = function(){
_q = [];
};
var length = function(){
return _q.length;
};
return {
"enqueue" : enqueue,
"dequeue" : dequeue,
"flush" : flush,
"clear" : clear,
"length" : length
};
};
// From: https://code.google.com/p/fbug/source/browse/branches/firebug1.10/content/firebug/lib/xpath.js
/**
* Gets an XPath for an element which describes its hierarchical location.
*/
EPUBJS.core.getElementXPath = function(element) {
if (element && element.id) {
return '//*[@id="' + element.id + '"]';
} else {
return EPUBJS.core.getElementTreeXPath(element);
}
};
EPUBJS.core.getElementTreeXPath = function(element) {
var paths = [];
var isXhtml = (element.ownerDocument.documentElement.getAttribute('xmlns') === "http://www.w3.org/1999/xhtml");
var index, nodeName, tagName, pathIndex;
if(element.nodeType === Node.TEXT_NODE){
// index = Array.prototype.indexOf.call(element.parentNode.childNodes, element) + 1;
index = EPUBJS.core.indexOfTextNode(element) + 1;
paths.push("text()["+index+"]");
element = element.parentNode;
}
// Use nodeName (instead of localName) so namespace prefix is included (if any).
for (; element && element.nodeType == 1; element = element.parentNode)
{
index = 0;
for (var sibling = element.previousSibling; sibling; sibling = sibling.previousSibling)
{
// Ignore document type declaration.
if (sibling.nodeType == Node.DOCUMENT_TYPE_NODE) {
continue;
}
if (sibling.nodeName == element.nodeName) {
++index;
}
}
nodeName = element.nodeName.toLowerCase();
tagName = (isXhtml ? "xhtml:" + nodeName : nodeName);
pathIndex = (index ? "[" + (index+1) + "]" : "");
paths.splice(0, 0, tagName + pathIndex);
}
return paths.length ? "./" + paths.join("/") : null;
};
EPUBJS.core.nsResolver = function(prefix) {
var ns = {
'xhtml' : 'http://www.w3.org/1999/xhtml',
'epub': 'http://www.idpf.org/2007/ops'
};
return ns[prefix] || null;
};
//https://stackoverflow.com/questions/13482352/xquery-looking-for-text-with-single-quote/13483496#13483496
EPUBJS.core.cleanStringForXpath = function(str) {
var parts = str.match(/[^'"]+|['"]/g);
parts = parts.map(function(part){
if (part === "'") {
return '\"\'\"'; // output "'"
}
if (part === '"') {
return "\'\"\'"; // output '"'
}
return "\'" + part + "\'";
});
return "concat(\'\'," + parts.join(",") + ")";
};
EPUBJS.core.indexOfTextNode = function(textNode){
var parent = textNode.parentNode;
var children = parent.childNodes;
var sib;
var index = -1;
for (var i = 0; i < children.length; i++) {
sib = children[i];
if(sib.nodeType === Node.TEXT_NODE){
index++;
}
if(sib == textNode) break;
}
return index;
};
// Underscore
EPUBJS.core.defaults = function(obj) {
for (var i = 1, length = arguments.length; i < length; i++) {
var source = arguments[i];
for (var prop in source) {
if (obj[prop] === void 0) obj[prop] = source[prop];
}
}
return obj;
};
EPUBJS.core.extend = function(target) {
var sources = [].slice.call(arguments, 1);
sources.forEach(function (source) {
if(!source) return;
Object.getOwnPropertyNames(source).forEach(function(propName) {
Object.defineProperty(target, propName, Object.getOwnPropertyDescriptor(source, propName));
});
});
return target;
};
EPUBJS.core.clone = function(obj) {
return EPUBJS.core.isArray(obj) ? obj.slice() : EPUBJS.core.extend({}, obj);
};
EPUBJS.core.isElement = function(obj) {
return !!(obj && obj.nodeType == 1);
};
EPUBJS.core.isNumber = function(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
};
EPUBJS.core.isString = function(str) {
return (typeof str === 'string' || str instanceof String);
};
EPUBJS.core.isArray = Array.isArray || function(obj) {
return Object.prototype.toString.call(obj) === '[object Array]';
};
// Lodash
EPUBJS.core.values = function(object) {
var index = -1;
var props, length, result;
if(!object) return [];
props = Object.keys(object);
length = props.length;
result = Array(length);
while (++index < length) {
result[index] = object[props[index]];
}
return result;
};
EPUBJS.core.indexOfNode = function(node, typeId) {
var parent = node.parentNode;
var children = parent.childNodes;
var sib;
var index = -1;
for (var i = 0; i < children.length; i++) {
sib = children[i];
if (sib.nodeType === typeId) {
index++;
}
if (sib == node) break;
}
return index;
}
EPUBJS.core.indexOfTextNode = function(textNode) {
return EPUBJS.core.indexOfNode(textNode, TEXT_NODE);
}
EPUBJS.core.indexOfElementNode = function(elementNode) {
return EPUBJS.core.indexOfNode(elementNode, ELEMENT_NODE);
}
var EPUBJS = EPUBJS || {};
EPUBJS.reader = {};
EPUBJS.reader.plugins = {}; //-- Attach extra Controllers as plugins (like search?)
(function(root, $) {
var previousReader = root.ePubReader || {};
var ePubReader = root.ePubReader = function(path, options) {
return new EPUBJS.Reader(path, options);
};
//exports to multiple environments
if (typeof define === 'function' && define.amd) {
//AMD
define(function(){ return Reader; });
} else if (typeof module != "undefined" && module.exports) {
//Node
module.exports = ePubReader;
}
})(window, jQuery);
EPUBJS.Reader = function(bookPath, _options) {
var reader = this;
var book;
var plugin;
var $viewer = $("#viewer");
var search = window.location.search;
var parameters;
this.settings = EPUBJS.core.defaults(_options || {}, {
bookPath : bookPath,
restore : false,
reload : false,
bookmarks : undefined,
annotations : undefined,
contained : undefined,
bookKey : undefined,
styles : undefined,
sidebarReflow: false,
generatePagination: false,
history: true
});
// Overide options with search parameters
if(search) {
parameters = search.slice(1).split("&");
parameters.forEach(function(p){
var split = p.split("=");
var name = split[0];
var value = split[1] || '';
reader.settings[name] = decodeURIComponent(value);
});
}
this.setBookKey(this.settings.bookPath); //-- This could be username + path or any unique string
if(this.settings.restore && this.isSaved()) {
this.applySavedSettings();
}
this.settings.styles = this.settings.styles || {
fontSize : "100%"
};
this.book = book = new ePub(this.settings.bookPath, this.settings);
this.offline = false;
this.sidebarOpen = false;
if(!this.settings.bookmarks) {
this.settings.bookmarks = [];
}
if(!this.settings.annotations) {
this.settings.annotations = [];
}
if(this.settings.generatePagination) {
book.generatePagination($viewer.width(), $viewer.height());
}
this.rendition = book.renderTo("viewer", {
ignoreClass: "annotator-hl",
width: "100%",
height: "100%"
});
if(this.settings.previousLocationCfi) {
this.displayed = this.rendition.display(this.settings.previousLocationCfi);
} else {
this.displayed = this.rendition.display();
}
book.ready.then(function () {
reader.ReaderController = EPUBJS.reader.ReaderController.call(reader, book);
reader.SettingsController = EPUBJS.reader.SettingsController.call(reader, book);
reader.ControlsController = EPUBJS.reader.ControlsController.call(reader, book);
reader.SidebarController = EPUBJS.reader.SidebarController.call(reader, book);
reader.BookmarksController = EPUBJS.reader.BookmarksController.call(reader, book);
reader.NotesController = EPUBJS.reader.NotesController.call(reader, book);
window.addEventListener("hashchange", this.hashChanged.bind(this), false);
document.addEventListener('keydown', this.adjustFontSize.bind(this), false);
this.rendition.on("keydown", this.adjustFontSize.bind(this));
this.rendition.on("keydown", reader.ReaderController.arrowKeys.bind(this));
this.rendition.on("selected", this.selectedRange.bind(this));
}.bind(this)).then(function() {
reader.ReaderController.hideLoader();
}.bind(this));
// Call Plugins
for(plugin in EPUBJS.reader.plugins) {
if(EPUBJS.reader.plugins.hasOwnProperty(plugin)) {
reader[plugin] = EPUBJS.reader.plugins[plugin].call(reader, book);
}
}
book.loaded.metadata.then(function(meta) {
reader.MetaController = EPUBJS.reader.MetaController.call(reader, meta);
});
book.loaded.navigation.then(function(navigation) {
reader.TocController = EPUBJS.reader.TocController.call(reader, navigation);
});
window.addEventListener("beforeunload", this.unload.bind(this), false);
return this;
};
EPUBJS.Reader.prototype.adjustFontSize = function(e) {
var fontSize;
var interval = 2;
var PLUS = 187;
var MINUS = 189;
var ZERO = 48;
var MOD = (e.ctrlKey || e.metaKey );
if(!this.settings.styles) return;
if(!this.settings.styles.fontSize) {
this.settings.styles.fontSize = "100%";
}
fontSize = parseInt(this.settings.styles.fontSize.slice(0, -1));
if(MOD && e.keyCode == PLUS) {
e.preventDefault();
this.book.setStyle("fontSize", (fontSize + interval) + "%");
}
if(MOD && e.keyCode == MINUS){
e.preventDefault();
this.book.setStyle("fontSize", (fontSize - interval) + "%");
}
if(MOD && e.keyCode == ZERO){
e.preventDefault();
this.book.setStyle("fontSize", "100%");
}
};
EPUBJS.Reader.prototype.addBookmark = function(cfi) {
var present = this.isBookmarked(cfi);
if(present > -1 ) return;
this.settings.bookmarks.push(cfi);
this.trigger("reader:bookmarked", cfi);
};
EPUBJS.Reader.prototype.removeBookmark = function(cfi) {
var bookmark = this.isBookmarked(cfi);
if( bookmark === -1 ) return;
this.settings.bookmarks.splice(bookmark, 1);
this.trigger("reader:unbookmarked", bookmark);
};
EPUBJS.Reader.prototype.isBookmarked = function(cfi) {
var bookmarks = this.settings.bookmarks;
return bookmarks.indexOf(cfi);
};
/*
EPUBJS.Reader.prototype.searchBookmarked = function(cfi) {
var bookmarks = this.settings.bookmarks,
len = bookmarks.length,
i;
for(i = 0; i < len; i++) {
if (bookmarks[i]['cfi'] === cfi) return i;
}
return -1;
};
*/
EPUBJS.Reader.prototype.clearBookmarks = function() {
this.settings.bookmarks = [];
};
//-- Notes
EPUBJS.Reader.prototype.addNote = function(note) {
this.settings.annotations.push(note);
};
EPUBJS.Reader.prototype.removeNote = function(note) {
var index = this.settings.annotations.indexOf(note);
if( index === -1 ) return;
delete this.settings.annotations[index];
};
EPUBJS.Reader.prototype.clearNotes = function() {
this.settings.annotations = [];
};
//-- Settings
EPUBJS.Reader.prototype.setBookKey = function(identifier){
if(!this.settings.bookKey) {
this.settings.bookKey = "epubjsreader:" + EPUBJS.VERSION + ":" + window.location.host + ":" + identifier;
}
return this.settings.bookKey;
};
//-- Checks if the book setting can be retrieved from localStorage
EPUBJS.Reader.prototype.isSaved = function(bookPath) {
var storedSettings;
if(!localStorage) {
return false;
}
storedSettings = localStorage.getItem(this.settings.bookKey);
if(storedSettings === null) {
return false;
} else {
return true;
}
};
EPUBJS.Reader.prototype.removeSavedSettings = function() {
if(!localStorage) {
return false;
}
localStorage.removeItem(this.settings.bookKey);
};
EPUBJS.Reader.prototype.applySavedSettings = function() {
var stored;
if(!localStorage) {
return false;
}
try {
stored = JSON.parse(localStorage.getItem(this.settings.bookKey));
} catch (e) { // parsing error of localStorage
return false;
}
if(stored) {
// Merge styles
if(stored.styles) {
this.settings.styles = EPUBJS.core.defaults(this.settings.styles || {}, stored.styles);
}
// Merge the rest
this.settings = EPUBJS.core.defaults(this.settings, stored);
return true;
} else {
return false;
}
};
EPUBJS.Reader.prototype.saveSettings = function(){
if(this.book) {
this.settings.previousLocationCfi = this.rendition.currentLocation().start.cfi;
}
if(!localStorage) {
return false;
}
localStorage.setItem(this.settings.bookKey, JSON.stringify(this.settings));
};
EPUBJS.Reader.prototype.unload = function(){
if(this.settings.restore && localStorage) {
this.saveSettings();
}
};
EPUBJS.Reader.prototype.hashChanged = function(){
var hash = window.location.hash.slice(1);
this.rendition.display(hash);
};
EPUBJS.Reader.prototype.selectedRange = function(cfiRange){
var cfiFragment = "#"+cfiRange;
// Update the History Location
if(this.settings.history &&
window.location.hash != cfiFragment) {
// Add CFI fragment to the history
history.pushState({}, '', cfiFragment);
this.currentLocationCfi = cfiRange;
}
};
//-- Enable binding events to reader
RSVP.EventTarget.mixin(EPUBJS.Reader.prototype);
EPUBJS.reader.BookmarksController = function() {
var reader = this;
var book = this.book;
var rendition = this.rendition;
var $bookmarks = $("#bookmarksView"),
$list = $bookmarks.find("#bookmarks");
var docfrag = document.createDocumentFragment();
var show = function() {
$bookmarks.show();
};
var hide = function() {
$bookmarks.hide();
};
var counter = 0;
var createBookmarkItem = function(cfi) {
var listitem = document.createElement("li"),
link = document.createElement("a");
listitem.id = "bookmark-"+counter;
listitem.classList.add('list_item');
var spineItem = book.spine.get(cfi);
var tocItem;
if (spineItem.index in book.navigation.toc) {
tocItem = book.navigation.toc[spineItem.index];
link.textContent = tocItem.label;
} else {
link.textContent = cfi;
}
link.href = cfi;
link.classList.add('bookmark_link');
link.addEventListener("click", function(event){
var cfi = this.getAttribute('href');
rendition.display(cfi);
event.preventDefault();
}, false);
listitem.appendChild(link);
counter++;
return listitem;
};
this.settings.bookmarks.forEach(function(cfi) {
var bookmark = createBookmarkItem(cfi);
docfrag.appendChild(bookmark);
});
$list.append(docfrag);
this.on("reader:bookmarked", function(cfi) {
var item = createBookmarkItem(cfi);
$list.append(item);
});
this.on("reader:unbookmarked", function(index) {
var $item = $("#bookmark-"+index);
$item.remove();
});
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.ControlsController = function(book) {
var reader = this;
var rendition = this.rendition;
var $store = $("#store"),
$fullscreen = $("#fullscreen"),
$fullscreenicon = $("#fullscreenicon"),
$cancelfullscreenicon = $("#cancelfullscreenicon"),
$slider = $("#slider"),
$main = $("#main"),
$sidebar = $("#sidebar"),
$settings = $("#setting"),
$bookmark = $("#bookmark");
/*
var goOnline = function() {
reader.offline = false;
// $store.attr("src", $icon.data("save"));
};
var goOffline = function() {
reader.offline = true;
// $store.attr("src", $icon.data("saved"));
};
var fullscreen = false;
book.on("book:online", goOnline);
book.on("book:offline", goOffline);
*/
$slider.on("click", function () {
if(reader.sidebarOpen) {
reader.SidebarController.hide();
$slider.addClass("icon-menu");
$slider.removeClass("icon-right");
} else {
reader.SidebarController.show();
$slider.addClass("icon-right");
$slider.removeClass("icon-menu");
}
});
if(typeof screenfull !== 'undefined') {
$fullscreen.on("click", function() {
screenfull.toggle($('#container')[0]);
});
if(screenfull.raw) {
document.addEventListener(screenfull.raw.fullscreenchange, function() {
fullscreen = screenfull.isFullscreen;
if(fullscreen) {
$fullscreen
.addClass("icon-resize-small")
.removeClass("icon-resize-full");
} else {
$fullscreen
.addClass("icon-resize-full")
.removeClass("icon-resize-small");
}
});
}
}
$settings.on("click", function() {
reader.SettingsController.show();
});
$bookmark.on("click", function() {
var cfi = reader.rendition.currentLocation().start.cfi;
var bookmarked = reader.isBookmarked(cfi);
if(bookmarked === -1) { //-- Add bookmark
reader.addBookmark(cfi);
$bookmark
.addClass("icon-bookmark")
.removeClass("icon-bookmark-empty");
} else { //-- Remove Bookmark
reader.removeBookmark(cfi);
$bookmark
.removeClass("icon-bookmark")
.addClass("icon-bookmark-empty");
}
});
rendition.on('relocated', function(location){
var cfi = location.start.cfi;
var cfiFragment = "#" + cfi;
//-- Check if bookmarked
var bookmarked = reader.isBookmarked(cfi);
if(bookmarked === -1) { //-- Not bookmarked
$bookmark
.removeClass("icon-bookmark")
.addClass("icon-bookmark-empty");
} else { //-- Bookmarked
$bookmark
.addClass("icon-bookmark")
.removeClass("icon-bookmark-empty");
}
reader.currentLocationCfi = cfi;
// Update the History Location
if(reader.settings.history &&
window.location.hash != cfiFragment) {
// Add CFI fragment to the history
history.pushState({}, '', cfiFragment);
}
});
return {
};
};
EPUBJS.reader.MetaController = function(meta) {
var title = meta.title,
author = meta.creator;
var $title = $("#book-title"),
$author = $("#chapter-title"),
$dash = $("#title-seperator");
document.title = title+" – "+author;
$title.html(title);
$author.html(author);
$dash.show();
};
EPUBJS.reader.NotesController = function() {
var book = this.book;
var rendition = this.rendition;
var reader = this;
var $notesView = $("#notesView");
var $notes = $("#notes");
var $text = $("#note-text");
var $anchor = $("#note-anchor");
var annotations = reader.settings.annotations;
var renderer = book.renderer;
var popups = [];
var epubcfi = new ePub.CFI();
var show = function() {
$notesView.show();
};
var hide = function() {
$notesView.hide();
}
var insertAtPoint = function(e) {
var range;
var textNode;
var offset;
var doc = book.renderer.doc;
var cfi;
var annotation;
// standard
if (doc.caretPositionFromPoint) {
range = doc.caretPositionFromPoint(e.clientX, e.clientY);
textNode = range.offsetNode;
offset = range.offset;
// WebKit
} else if (doc.caretRangeFromPoint) {
range = doc.caretRangeFromPoint(e.clientX, e.clientY);
textNode = range.startContainer;
offset = range.startOffset;
}
if (textNode.nodeType !== 3) {
for (var i=0; i < textNode.childNodes.length; i++) {
if (textNode.childNodes[i].nodeType == 3) {
textNode = textNode.childNodes[i];
break;
}
}
}
// Find the end of the sentance
offset = textNode.textContent.indexOf(".", offset);
if(offset === -1){
offset = textNode.length; // Last item
} else {
offset += 1; // After the period
}
cfi = epubcfi.generateCfiFromTextNode(textNode, offset, book.renderer.currentChapter.cfiBase);
annotation = {
annotatedAt: new Date(),
anchor: cfi,
body: $text.val()
}
// add to list
reader.addNote(annotation);
// attach
addAnnotation(annotation);
placeMarker(annotation);
// clear
$text.val('');
$anchor.text("Attach");
$text.prop("disabled", false);
rendition.off("click", insertAtPoint);
};
var addAnnotation = function(annotation){
var note = document.createElement("li");
var link = document.createElement("a");
note.innerHTML = annotation.body;
// note.setAttribute("ref", annotation.anchor);
link.innerHTML = " context »";
link.href = "#"+annotation.anchor;
link.onclick = function(){
rendition.display(annotation.anchor);
return false;
};
note.appendChild(link);
$notes.append(note);
};
var placeMarker = function(annotation){
var doc = book.renderer.doc;
var marker = document.createElement("span");
var mark = document.createElement("a");
marker.classList.add("footnotesuperscript", "reader_generated");
marker.style.verticalAlign = "super";
marker.style.fontSize = ".75em";
// marker.style.position = "relative";
marker.style.lineHeight = "1em";
// mark.style.display = "inline-block";
mark.style.padding = "2px";
mark.style.backgroundColor = "#fffa96";
mark.style.borderRadius = "5px";
mark.style.cursor = "pointer";
marker.id = "note-"+EPUBJS.core.uuid();
mark.innerHTML = annotations.indexOf(annotation) + 1 + "[Reader]";
marker.appendChild(mark);
epubcfi.addMarker(annotation.anchor, doc, marker);
markerEvents(marker, annotation.body);
}
var markerEvents = function(item, txt){
var id = item.id;
var showPop = function(){
var poppos,
iheight = renderer.height,
iwidth = renderer.width,
tip,
pop,
maxHeight = 225,
itemRect,
left,
top,
pos;
//-- create a popup with endnote inside of it
if(!popups[id]) {
popups[id] = document.createElement("div");
popups[id].setAttribute("class", "popup");
pop_content = document.createElement("div");
popups[id].appendChild(pop_content);
pop_content.innerHTML = txt;
pop_content.setAttribute("class", "pop_content");
renderer.render.document.body.appendChild(popups[id]);
//-- TODO: will these leak memory? - Fred
popups[id].addEventListener("mouseover", onPop, false);
popups[id].addEventListener("mouseout", offPop, false);
//-- Add hide on page change
rendition.on("locationChanged", hidePop, this);
rendition.on("locationChanged", offPop, this);
// chapter.book.on("renderer:chapterDestroy", hidePop, this);
}
pop = popups[id];
//-- get location of item
itemRect = item.getBoundingClientRect();
left = itemRect.left;
top = itemRect.top;
//-- show the popup
pop.classList.add("show");
//-- locations of popup
popRect = pop.getBoundingClientRect();
//-- position the popup
pop.style.left = left - popRect.width / 2 + "px";
pop.style.top = top + "px";
//-- Adjust max height
if(maxHeight > iheight / 2.5) {
maxHeight = iheight / 2.5;
pop_content.style.maxHeight = maxHeight + "px";
}
//-- switch above / below
if(popRect.height + top >= iheight - 25) {
pop.style.top = top - popRect.height + "px";
pop.classList.add("above");
}else{
pop.classList.remove("above");
}
//-- switch left
if(left - popRect.width <= 0) {
pop.style.left = left + "px";
pop.classList.add("left");
}else{
pop.classList.remove("left");
}
//-- switch right
if(left + popRect.width / 2 >= iwidth) {
//-- TEMP MOVE: 300
pop.style.left = left - 300 + "px";
popRect = pop.getBoundingClientRect();
pop.style.left = left - popRect.width + "px";
//-- switch above / below again
if(popRect.height + top >= iheight - 25) {
pop.style.top = top - popRect.height + "px";
pop.classList.add("above");
}else{
pop.classList.remove("above");
}
pop.classList.add("right");
}else{
pop.classList.remove("right");
}
}
var onPop = function(){
popups[id].classList.add("on");
}
var offPop = function(){
popups[id].classList.remove("on");
}
var hidePop = function(){
setTimeout(function(){
popups[id].classList.remove("show");
}, 100);
}
var openSidebar = function(){
reader.ReaderController.slideOut();
show();
};
item.addEventListener("mouseover", showPop, false);
item.addEventListener("mouseout", hidePop, false);
item.addEventListener("click", openSidebar, false);
}
$anchor.on("click", function(e){
$anchor.text("Cancel");
$text.prop("disabled", "true");
// listen for selection
rendition.on("click", insertAtPoint);
});
annotations.forEach(function(note) {
addAnnotation(note);
});
/*
renderer.registerHook("beforeChapterDisplay", function(callback, renderer){
var chapter = renderer.currentChapter;
annotations.forEach(function(note) {
var cfi = epubcfi.parse(note.anchor);
if(cfi.spinePos === chapter.spinePos) {
try {
placeMarker(note);
} catch(e) {
console.log("anchoring failed", note.anchor);
}
}
});
callback();
}, true);
*/
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.ReaderController = function(book) {
var $main = $("#main"),
$divider = $("#divider"),
$loader = $("#loader"),
$next = $("#next"),
$prev = $("#prev");
var reader = this;
var book = this.book;
var rendition = this.rendition;
var slideIn = function() {
var currentPosition = rendition.currentLocation().start.cfi;
if (reader.settings.sidebarReflow){
$main.removeClass('single');
$main.one("transitionend", function(){
rendition.resize();
});
} else {
$main.removeClass("closed");
}
};
var slideOut = function() {
var location = rendition.currentLocation();
if (!location) {
return;
}
var currentPosition = location.start.cfi;
if (reader.settings.sidebarReflow){
$main.addClass('single');
$main.one("transitionend", function(){
rendition.resize();
});
} else {
$main.addClass("closed");
}
};
var showLoader = function() {
$loader.show();
hideDivider();
};
var hideLoader = function() {
$loader.hide();
//-- If the book is using spreads, show the divider
// if(book.settings.spreads) {
// showDivider();
// }
};
var showDivider = function() {
$divider.addClass("show");
};
var hideDivider = function() {
$divider.removeClass("show");
};
var keylock = false;
var arrowKeys = function(e) {
if(e.keyCode == 37) {
if(book.package.metadata.direction === "rtl") {
rendition.next();
} else {
rendition.prev();
}
$prev.addClass("active");
keylock = true;
setTimeout(function(){
keylock = false;
$prev.removeClass("active");
}, 100);
e.preventDefault();
}
if(e.keyCode == 39) {
if(book.package.metadata.direction === "rtl") {
rendition.prev();
} else {
rendition.next();
}
$next.addClass("active");
keylock = true;
setTimeout(function(){
keylock = false;
$next.removeClass("active");
}, 100);
e.preventDefault();
}
}
document.addEventListener('keydown', arrowKeys, false);
$next.on("click", function(e){
if(book.package.metadata.direction === "rtl") {
rendition.prev();
} else {
rendition.next();
}
e.preventDefault();
});
$prev.on("click", function(e){
if(book.package.metadata.direction === "rtl") {
rendition.next();
} else {
rendition.prev();
}
e.preventDefault();
});
rendition.on("layout", function(props){
if(props.spread === true) {
showDivider();
} else {
hideDivider();
}
});
rendition.on('relocated', function(location){
if (location.atStart) {
$prev.addClass("disabled");
}
if (location.atEnd) {
$next.addClass("disabled");
}
});
return {
"slideOut" : slideOut,
"slideIn" : slideIn,
"showLoader" : showLoader,
"hideLoader" : hideLoader,
"showDivider" : showDivider,
"hideDivider" : hideDivider,
"arrowKeys" : arrowKeys
};
};
EPUBJS.reader.SettingsController = function() {
var book = this.book;
var reader = this;
var $settings = $("#settings-modal"),
$overlay = $(".overlay");
var show = function() {
$settings.addClass("md-show");
};
var hide = function() {
$settings.removeClass("md-show");
};
var $sidebarReflowSetting = $('#sidebarReflow');
$sidebarReflowSetting.on('click', function() {
reader.settings.sidebarReflow = !reader.settings.sidebarReflow;
});
$settings.find(".closer").on("click", function() {
hide();
});
$overlay.on("click", function() {
hide();
});
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.SidebarController = function(book) {
var reader = this;
var $sidebar = $("#sidebar"),
$panels = $("#panels");
var activePanel = "Toc";
var changePanelTo = function(viewName) {
var controllerName = viewName + "Controller";
if(activePanel == viewName || typeof reader[controllerName] === 'undefined' ) return;
reader[activePanel+ "Controller"].hide();
reader[controllerName].show();
activePanel = viewName;
$panels.find('.active').removeClass("active");
$panels.find("#show-" + viewName ).addClass("active");
};
var getActivePanel = function() {
return activePanel;
};
var show = function() {
reader.sidebarOpen = true;
reader.ReaderController.slideOut();
$sidebar.addClass("open");
}
var hide = function() {
reader.sidebarOpen = false;
reader.ReaderController.slideIn();
$sidebar.removeClass("open");
}
$panels.find(".show_view").on("click", function(event) {
var view = $(this).data("view");
changePanelTo(view);
event.preventDefault();
});
return {
'show' : show,
'hide' : hide,
'getActivePanel' : getActivePanel,
'changePanelTo' : changePanelTo
};
};
EPUBJS.reader.TocController = function(toc) {
var book = this.book;
var rendition = this.rendition;
var $list = $("#tocView"),
docfrag = document.createDocumentFragment();
var currentChapter = false;
var generateTocItems = function(toc, level) {
var container = document.createElement("ul");
if(!level) level = 1;
toc.forEach(function(chapter) {
var listitem = document.createElement("li"),
link = document.createElement("a");
toggle = document.createElement("a");
var subitems;
listitem.id = "toc-"+chapter.id;
listitem.classList.add('list_item');
link.textContent = chapter.label;
link.href = chapter.href;
link.classList.add('toc_link');
listitem.appendChild(link);
if(chapter.subitems && chapter.subitems.length > 0) {
level++;
subitems = generateTocItems(chapter.subitems, level);
toggle.classList.add('toc_toggle');
listitem.insertBefore(toggle, link);
listitem.appendChild(subitems);
}
container.appendChild(listitem);
});
return container;
};
var onShow = function() {
$list.show();
};
var onHide = function() {
$list.hide();
};
var chapterChange = function(e) {
var id = e.id,
$item = $list.find("#toc-"+id),
$current = $list.find(".currentChapter"),
$open = $list.find('.openChapter');
if($item.length){
if($item != $current && $item.has(currentChapter).length > 0) {
$current.removeClass("currentChapter");
}
$item.addClass("currentChapter");
// $open.removeClass("openChapter");
$item.parents('li').addClass("openChapter");
}
};
rendition.on('renderered', chapterChange);
var tocitems = generateTocItems(toc);
docfrag.appendChild(tocitems);
$list.append(docfrag);
$list.find(".toc_link").on("click", function(event){
var url = this.getAttribute('href');
event.preventDefault();
//-- Provide the Book with the url to show
// The Url must be found in the books manifest
rendition.display(url);
$list.find(".currentChapter")
.addClass("openChapter")
.removeClass("currentChapter");
$(this).parent('li').addClass("currentChapter");
});
$list.find(".toc_toggle").on("click", function(event){
var $el = $(this).parent('li'),
open = $el.hasClass("openChapter");
event.preventDefault();
if(open){
$el.removeClass("openChapter");
} else {
$el.addClass("openChapter");
}
});
return {
"show" : onShow,
"hide" : onHide
};
};
//# sourceMappingURL=reader.js.map | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/reader.js | reader.js |
window.hypothesisConfig = function() {
var Annotator = window.Annotator;
var $main = $("#main");
function EpubAnnotationSidebar(elem, options) {
options = {
server: true,
origin: true,
showHighlights: true,
Toolbar: {container: '#annotation-controls'}
}
Annotator.Host.call(this, elem, options);
}
EpubAnnotationSidebar.prototype = Object.create(Annotator.Host.prototype);
EpubAnnotationSidebar.prototype.show = function() {
this.frame.css({
'margin-left': (-1 * this.frame.width()) + "px"
});
this.frame.removeClass('annotator-collapsed');
if (!$main.hasClass('single')) {
$main.addClass("single");
this.toolbar.find('[name=sidebar-toggle]').removeClass('h-icon-chevron-left').addClass('h-icon-chevron-right');
this.setVisibleHighlights(true);
}
};
EpubAnnotationSidebar.prototype.hide = function() {
this.frame.css({
'margin-left': ''
});
this.frame.addClass('annotator-collapsed');
if ($main.hasClass('single')) {
$main.removeClass("single");
this.toolbar.find('[name=sidebar-toggle]').removeClass('h-icon-chevron-right').addClass('h-icon-chevron-left');
this.setVisibleHighlights(false);
}
};
return {
constructor: EpubAnnotationSidebar,
}
};
// This is the Epub.js plugin. Annotations are updated on location change.
EPUBJS.reader.plugins.HypothesisController = function (Book) {
var reader = this;
var $main = $("#main");
var updateAnnotations = function () {
var annotator = Book.renderer.render.window.annotator;
if (annotator && annotator.constructor.$) {
var annotations = getVisibleAnnotations(annotator.constructor.$);
annotator.showAnnotations(annotations)
}
};
var getVisibleAnnotations = function ($) {
var width = Book.renderer.render.iframe.clientWidth;
return $('.annotator-hl').map(function() {
var $this = $(this),
left = this.getBoundingClientRect().left;
if (left >= 0 && left <= width) {
return $this.data('annotation');
}
}).get();
};
Book.on("renderer:locationChanged", updateAnnotations);
return {}
}; | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/plugins/hypothesis.js | hypothesis.js |
EPUBJS.reader.search = {};
// Search Server -- https://github.com/futurepress/epubjs-search
EPUBJS.reader.search.SERVER = "https://pacific-cliffs-3579.herokuapp.com";
EPUBJS.reader.search.request = function(q, callback) {
var fetch = $.ajax({
dataType: "json",
url: EPUBJS.reader.search.SERVER + "/search?q=" + encodeURIComponent(q)
});
fetch.fail(function(err) {
console.error(err);
});
fetch.done(function(results) {
callback(results);
});
};
EPUBJS.reader.plugins.SearchController = function(Book) {
var reader = this;
var $searchBox = $("#searchBox"),
$searchResults = $("#searchResults"),
$searchView = $("#searchView"),
iframeDoc;
var searchShown = false;
var onShow = function() {
query();
searchShown = true;
$searchView.addClass("shown");
};
var onHide = function() {
searchShown = false;
$searchView.removeClass("shown");
};
var query = function() {
var q = $searchBox.val();
if(q == '') {
return;
}
$searchResults.empty();
$searchResults.append("<li><p>Searching...</p></li>");
EPUBJS.reader.search.request(q, function(data) {
var results = data.results;
$searchResults.empty();
if(iframeDoc) {
$(iframeDoc).find('body').unhighlight();
}
if(results.length == 0) {
$searchResults.append("<li><p>No Results Found</p></li>");
return;
}
iframeDoc = $("#viewer iframe")[0].contentDocument;
$(iframeDoc).find('body').highlight(q, { element: 'span' });
results.forEach(function(result) {
var $li = $("<li></li>");
var $item = $("<a href='"+result.href+"' data-cfi='"+result.cfi+"'><span>"+result.title+"</span><p>"+result.highlight+"</p></a>");
$item.on("click", function(e) {
var $this = $(this),
cfi = $this.data("cfi");
e.preventDefault();
Book.gotoCfi(cfi+"/1:0");
Book.on("renderer:chapterDisplayed", function() {
iframeDoc = $("#viewer iframe")[0].contentDocument;
$(iframeDoc).find('body').highlight(q, { element: 'span' });
})
});
$li.append($item);
$searchResults.append($li);
});
});
};
$searchBox.on("search", function(e) {
var q = $searchBox.val();
//-- SearchBox is empty or cleared
if(q == '') {
$searchResults.empty();
if(reader.SidebarController.getActivePanel() == "Search") {
reader.SidebarController.changePanelTo("Toc");
}
$(iframeDoc).find('body').unhighlight();
iframeDoc = false;
return;
}
reader.SidebarController.changePanelTo("Search");
e.preventDefault();
});
return {
"show" : onShow,
"hide" : onHide
};
}; | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/plugins/search.js | search.js |
!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;b="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,b.JSZip=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);var j=new Error("Cannot find module '"+g+"'");throw j.code="MODULE_NOT_FOUND",j}var k=c[g]={exports:{}};b[g][0].call(k.exports,function(a){var c=b[g][1][a];return e(c?c:a)},k,k.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){"use strict";var d=a("./utils"),e=a("./support"),f="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";c.encode=function(a){for(var b,c,e,g,h,i,j,k=[],l=0,m=a.length,n=m,o="string"!==d.getTypeOf(a);l<a.length;)n=m-l,o?(b=a[l++],c=l<m?a[l++]:0,e=l<m?a[l++]:0):(b=a.charCodeAt(l++),c=l<m?a.charCodeAt(l++):0,e=l<m?a.charCodeAt(l++):0),g=b>>2,h=(3&b)<<4|c>>4,i=n>1?(15&c)<<2|e>>6:64,j=n>2?63&e:64,k.push(f.charAt(g)+f.charAt(h)+f.charAt(i)+f.charAt(j));return k.join("")},c.decode=function(a){var b,c,d,g,h,i,j,k=0,l=0,m="data:";if(a.substr(0,m.length)===m)throw new Error("Invalid base64 input, it looks like a data url.");a=a.replace(/[^A-Za-z0-9\+\/\=]/g,"");var n=3*a.length/4;if(a.charAt(a.length-1)===f.charAt(64)&&n--,a.charAt(a.length-2)===f.charAt(64)&&n--,n%1!==0)throw new Error("Invalid base64 input, bad content length.");var o;for(o=e.uint8array?new Uint8Array(0|n):new Array(0|n);k<a.length;)g=f.indexOf(a.charAt(k++)),h=f.indexOf(a.charAt(k++)),i=f.indexOf(a.charAt(k++)),j=f.indexOf(a.charAt(k++)),b=g<<2|h>>4,c=(15&h)<<4|i>>2,d=(3&i)<<6|j,o[l++]=b,64!==i&&(o[l++]=c),64!==j&&(o[l++]=d);return o}},{"./support":30,"./utils":32}],2:[function(a,b,c){"use strict";function d(a,b,c,d,e){this.compressedSize=a,this.uncompressedSize=b,this.crc32=c,this.compression=d,this.compressedContent=e}var e=a("./external"),f=a("./stream/DataWorker"),g=a("./stream/DataLengthProbe"),h=a("./stream/Crc32Probe"),g=a("./stream/DataLengthProbe");d.prototype={getContentWorker:function(){var a=new f(e.Promise.resolve(this.compressedContent)).pipe(this.compression.uncompressWorker()).pipe(new g("data_length")),b=this;return a.on("end",function(){if(this.streamInfo.data_length!==b.uncompressedSize)throw new Error("Bug : uncompressed data size mismatch")}),a},getCompressedWorker:function(){return new f(e.Promise.resolve(this.compressedContent)).withStreamInfo("compressedSize",this.compressedSize).withStreamInfo("uncompressedSize",this.uncompressedSize).withStreamInfo("crc32",this.crc32).withStreamInfo("compression",this.compression)}},d.createWorkerFrom=function(a,b,c){return a.pipe(new h).pipe(new g("uncompressedSize")).pipe(b.compressWorker(c)).pipe(new g("compressedSize")).withStreamInfo("compression",b)},b.exports=d},{"./external":6,"./stream/Crc32Probe":25,"./stream/DataLengthProbe":26,"./stream/DataWorker":27}],3:[function(a,b,c){"use strict";var d=a("./stream/GenericWorker");c.STORE={magic:"\0\0",compressWorker:function(a){return new d("STORE compression")},uncompressWorker:function(){return new d("STORE decompression")}},c.DEFLATE=a("./flate")},{"./flate":7,"./stream/GenericWorker":28}],4:[function(a,b,c){"use strict";function d(){for(var a,b=[],c=0;c<256;c++){a=c;for(var d=0;d<8;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function e(a,b,c,d){var e=h,f=d+c;a^=-1;for(var g=d;g<f;g++)a=a>>>8^e[255&(a^b[g])];return a^-1}function f(a,b,c,d){var e=h,f=d+c;a^=-1;for(var g=d;g<f;g++)a=a>>>8^e[255&(a^b.charCodeAt(g))];return a^-1}var g=a("./utils"),h=d();b.exports=function(a,b){if("undefined"==typeof a||!a.length)return 0;var c="string"!==g.getTypeOf(a);return c?e(0|b,a,a.length,0):f(0|b,a,a.length,0)}},{"./utils":32}],5:[function(a,b,c){"use strict";c.base64=!1,c.binary=!1,c.dir=!1,c.createFolders=!0,c.date=null,c.compression=null,c.compressionOptions=null,c.comment=null,c.unixPermissions=null,c.dosPermissions=null},{}],6:[function(a,b,c){"use strict";var d=null;d="undefined"!=typeof Promise?Promise:a("lie"),b.exports={Promise:d}},{lie:58}],7:[function(a,b,c){"use strict";function d(a,b){h.call(this,"FlateWorker/"+a),this._pako=null,this._pakoAction=a,this._pakoOptions=b,this.meta={}}var e="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Uint32Array,f=a("pako"),g=a("./utils"),h=a("./stream/GenericWorker"),i=e?"uint8array":"array";c.magic="\b\0",g.inherits(d,h),d.prototype.processChunk=function(a){this.meta=a.meta,null===this._pako&&this._createPako(),this._pako.push(g.transformTo(i,a.data),!1)},d.prototype.flush=function(){h.prototype.flush.call(this),null===this._pako&&this._createPako(),this._pako.push([],!0)},d.prototype.cleanUp=function(){h.prototype.cleanUp.call(this),this._pako=null},d.prototype._createPako=function(){this._pako=new f[this._pakoAction]({raw:!0,level:this._pakoOptions.level||-1});var a=this;this._pako.onData=function(b){a.push({data:b,meta:a.meta})}},c.compressWorker=function(a){return new d("Deflate",a)},c.uncompressWorker=function(){return new d("Inflate",{})}},{"./stream/GenericWorker":28,"./utils":32,pako:59}],8:[function(a,b,c){"use strict";function d(a,b,c,d){f.call(this,"ZipFileWorker"),this.bytesWritten=0,this.zipComment=b,this.zipPlatform=c,this.encodeFileName=d,this.streamFiles=a,this.accumulate=!1,this.contentBuffer=[],this.dirRecords=[],this.currentSourceOffset=0,this.entriesCount=0,this.currentFile=null,this._sources=[]}var e=a("../utils"),f=a("../stream/GenericWorker"),g=a("../utf8"),h=a("../crc32"),i=a("../signature"),j=function(a,b){var c,d="";for(c=0;c<b;c++)d+=String.fromCharCode(255&a),a>>>=8;return d},k=function(a,b){var c=a;return a||(c=b?16893:33204),(65535&c)<<16},l=function(a,b){return 63&(a||0)},m=function(a,b,c,d,f,m){var n,o,p=a.file,q=a.compression,r=m!==g.utf8encode,s=e.transformTo("string",m(p.name)),t=e.transformTo("string",g.utf8encode(p.name)),u=p.comment,v=e.transformTo("string",m(u)),w=e.transformTo("string",g.utf8encode(u)),x=t.length!==p.name.length,y=w.length!==u.length,z="",A="",B="",C=p.dir,D=p.date,E={crc32:0,compressedSize:0,uncompressedSize:0};b&&!c||(E.crc32=a.crc32,E.compressedSize=a.compressedSize,E.uncompressedSize=a.uncompressedSize);var F=0;b&&(F|=8),r||!x&&!y||(F|=2048);var G=0,H=0;C&&(G|=16),"UNIX"===f?(H=798,G|=k(p.unixPermissions,C)):(H=20,G|=l(p.dosPermissions,C)),n=D.getUTCHours(),n<<=6,n|=D.getUTCMinutes(),n<<=5,n|=D.getUTCSeconds()/2,o=D.getUTCFullYear()-1980,o<<=4,o|=D.getUTCMonth()+1,o<<=5,o|=D.getUTCDate(),x&&(A=j(1,1)+j(h(s),4)+t,z+="up"+j(A.length,2)+A),y&&(B=j(1,1)+j(h(v),4)+w,z+="uc"+j(B.length,2)+B);var I="";I+="\n\0",I+=j(F,2),I+=q.magic,I+=j(n,2),I+=j(o,2),I+=j(E.crc32,4),I+=j(E.compressedSize,4),I+=j(E.uncompressedSize,4),I+=j(s.length,2),I+=j(z.length,2);var J=i.LOCAL_FILE_HEADER+I+s+z,K=i.CENTRAL_FILE_HEADER+j(H,2)+I+j(v.length,2)+"\0\0\0\0"+j(G,4)+j(d,4)+s+z+v;return{fileRecord:J,dirRecord:K}},n=function(a,b,c,d,f){var g="",h=e.transformTo("string",f(d));return g=i.CENTRAL_DIRECTORY_END+"\0\0\0\0"+j(a,2)+j(a,2)+j(b,4)+j(c,4)+j(h.length,2)+h},o=function(a){var b="";return b=i.DATA_DESCRIPTOR+j(a.crc32,4)+j(a.compressedSize,4)+j(a.uncompressedSize,4)};e.inherits(d,f),d.prototype.push=function(a){var b=a.meta.percent||0,c=this.entriesCount,d=this._sources.length;this.accumulate?this.contentBuffer.push(a):(this.bytesWritten+=a.data.length,f.prototype.push.call(this,{data:a.data,meta:{currentFile:this.currentFile,percent:c?(b+100*(c-d-1))/c:100}}))},d.prototype.openedSource=function(a){this.currentSourceOffset=this.bytesWritten,this.currentFile=a.file.name;var b=this.streamFiles&&!a.file.dir;if(b){var c=m(a,b,!1,this.currentSourceOffset,this.zipPlatform,this.encodeFileName);this.push({data:c.fileRecord,meta:{percent:0}})}else this.accumulate=!0},d.prototype.closedSource=function(a){this.accumulate=!1;var b=this.streamFiles&&!a.file.dir,c=m(a,b,!0,this.currentSourceOffset,this.zipPlatform,this.encodeFileName);if(this.dirRecords.push(c.dirRecord),b)this.push({data:o(a),meta:{percent:100}});else for(this.push({data:c.fileRecord,meta:{percent:0}});this.contentBuffer.length;)this.push(this.contentBuffer.shift());this.currentFile=null},d.prototype.flush=function(){for(var a=this.bytesWritten,b=0;b<this.dirRecords.length;b++)this.push({data:this.dirRecords[b],meta:{percent:100}});var c=this.bytesWritten-a,d=n(this.dirRecords.length,c,a,this.zipComment,this.encodeFileName);this.push({data:d,meta:{percent:100}})},d.prototype.prepareNextSource=function(){this.previous=this._sources.shift(),this.openedSource(this.previous.streamInfo),this.isPaused?this.previous.pause():this.previous.resume()},d.prototype.registerPrevious=function(a){this._sources.push(a);var b=this;return a.on("data",function(a){b.processChunk(a)}),a.on("end",function(){b.closedSource(b.previous.streamInfo),b._sources.length?b.prepareNextSource():b.end()}),a.on("error",function(a){b.error(a)}),this},d.prototype.resume=function(){return!!f.prototype.resume.call(this)&&(!this.previous&&this._sources.length?(this.prepareNextSource(),!0):this.previous||this._sources.length||this.generatedError?void 0:(this.end(),!0))},d.prototype.error=function(a){var b=this._sources;if(!f.prototype.error.call(this,a))return!1;for(var c=0;c<b.length;c++)try{b[c].error(a)}catch(a){}return!0},d.prototype.lock=function(){f.prototype.lock.call(this);for(var a=this._sources,b=0;b<a.length;b++)a[b].lock()},b.exports=d},{"../crc32":4,"../signature":23,"../stream/GenericWorker":28,"../utf8":31,"../utils":32}],9:[function(a,b,c){"use strict";var d=a("../compressions"),e=a("./ZipFileWorker"),f=function(a,b){var c=a||b,e=d[c];if(!e)throw new Error(c+" is not a valid compression method !");return e};c.generateWorker=function(a,b,c){var d=new e(b.streamFiles,c,b.platform,b.encodeFileName),g=0;try{a.forEach(function(a,c){g++;var e=f(c.options.compression,b.compression),h=c.options.compressionOptions||b.compressionOptions||{},i=c.dir,j=c.date;c._compressWorker(e,h).withStreamInfo("file",{name:a,dir:i,date:j,comment:c.comment||"",unixPermissions:c.unixPermissions,dosPermissions:c.dosPermissions}).pipe(d)}),d.entriesCount=g}catch(h){d.error(h)}return d}},{"../compressions":3,"./ZipFileWorker":8}],10:[function(a,b,c){"use strict";function d(){if(!(this instanceof d))return new d;if(arguments.length)throw new Error("The constructor with parameters has been removed in JSZip 3.0, please check the upgrade guide.");this.files={},this.comment=null,this.root="",this.clone=function(){var a=new d;for(var b in this)"function"!=typeof this[b]&&(a[b]=this[b]);return a}}d.prototype=a("./object"),d.prototype.loadAsync=a("./load"),d.support=a("./support"),d.defaults=a("./defaults"),d.version="3.1.5",d.loadAsync=function(a,b){return(new d).loadAsync(a,b)},d.external=a("./external"),b.exports=d},{"./defaults":5,"./external":6,"./load":11,"./object":15,"./support":30}],11:[function(a,b,c){"use strict";function d(a){return new f.Promise(function(b,c){var d=a.decompressed.getContentWorker().pipe(new i);d.on("error",function(a){c(a)}).on("end",function(){d.streamInfo.crc32!==a.decompressed.crc32?c(new Error("Corrupted zip : CRC32 mismatch")):b()}).resume()})}var e=a("./utils"),f=a("./external"),g=a("./utf8"),e=a("./utils"),h=a("./zipEntries"),i=a("./stream/Crc32Probe"),j=a("./nodejsUtils");b.exports=function(a,b){var c=this;return b=e.extend(b||{},{base64:!1,checkCRC32:!1,optimizedBinaryString:!1,createFolders:!1,decodeFileName:g.utf8decode}),j.isNode&&j.isStream(a)?f.Promise.reject(new Error("JSZip can't accept a stream when loading a zip file.")):e.prepareContent("the loaded zip file",a,!0,b.optimizedBinaryString,b.base64).then(function(a){var c=new h(b);return c.load(a),c}).then(function(a){var c=[f.Promise.resolve(a)],e=a.files;if(b.checkCRC32)for(var g=0;g<e.length;g++)c.push(d(e[g]));return f.Promise.all(c)}).then(function(a){for(var d=a.shift(),e=d.files,f=0;f<e.length;f++){var g=e[f];c.file(g.fileNameStr,g.decompressed,{binary:!0,optimizedBinaryString:!0,date:g.date,dir:g.dir,comment:g.fileCommentStr.length?g.fileCommentStr:null,unixPermissions:g.unixPermissions,dosPermissions:g.dosPermissions,createFolders:b.createFolders})}return d.zipComment.length&&(c.comment=d.zipComment),c})}},{"./external":6,"./nodejsUtils":14,"./stream/Crc32Probe":25,"./utf8":31,"./utils":32,"./zipEntries":33}],12:[function(a,b,c){"use strict";function d(a,b){f.call(this,"Nodejs stream input adapter for "+a),this._upstreamEnded=!1,this._bindStream(b)}var e=a("../utils"),f=a("../stream/GenericWorker");e.inherits(d,f),d.prototype._bindStream=function(a){var b=this;this._stream=a,a.pause(),a.on("data",function(a){b.push({data:a,meta:{percent:0}})}).on("error",function(a){b.isPaused?this.generatedError=a:b.error(a)}).on("end",function(){b.isPaused?b._upstreamEnded=!0:b.end()})},d.prototype.pause=function(){return!!f.prototype.pause.call(this)&&(this._stream.pause(),!0)},d.prototype.resume=function(){return!!f.prototype.resume.call(this)&&(this._upstreamEnded?this.end():this._stream.resume(),!0)},b.exports=d},{"../stream/GenericWorker":28,"../utils":32}],13:[function(a,b,c){"use strict";function d(a,b,c){e.call(this,b),this._helper=a;var d=this;a.on("data",function(a,b){d.push(a)||d._helper.pause(),c&&c(b)}).on("error",function(a){d.emit("error",a)}).on("end",function(){d.push(null)})}var e=a("readable-stream").Readable,f=a("../utils");f.inherits(d,e),d.prototype._read=function(){this._helper.resume()},b.exports=d},{"../utils":32,"readable-stream":16}],14:[function(a,b,c){"use strict";b.exports={isNode:"undefined"!=typeof Buffer,newBufferFrom:function(a,b){return new Buffer(a,b)},allocBuffer:function(a){return Buffer.alloc?Buffer.alloc(a):new Buffer(a)},isBuffer:function(a){return Buffer.isBuffer(a)},isStream:function(a){return a&&"function"==typeof a.on&&"function"==typeof a.pause&&"function"==typeof a.resume}}},{}],15:[function(a,b,c){"use strict";function d(a){return"[object RegExp]"===Object.prototype.toString.call(a)}var e=a("./utf8"),f=a("./utils"),g=a("./stream/GenericWorker"),h=a("./stream/StreamHelper"),i=a("./defaults"),j=a("./compressedObject"),k=a("./zipObject"),l=a("./generate"),m=a("./nodejsUtils"),n=a("./nodejs/NodejsStreamInputAdapter"),o=function(a,b,c){var d,e=f.getTypeOf(b),h=f.extend(c||{},i);h.date=h.date||new Date,null!==h.compression&&(h.compression=h.compression.toUpperCase()),"string"==typeof h.unixPermissions&&(h.unixPermissions=parseInt(h.unixPermissions,8)),h.unixPermissions&&16384&h.unixPermissions&&(h.dir=!0),h.dosPermissions&&16&h.dosPermissions&&(h.dir=!0),h.dir&&(a=q(a)),h.createFolders&&(d=p(a))&&r.call(this,d,!0);var l="string"===e&&h.binary===!1&&h.base64===!1;c&&"undefined"!=typeof c.binary||(h.binary=!l);var o=b instanceof j&&0===b.uncompressedSize;(o||h.dir||!b||0===b.length)&&(h.base64=!1,h.binary=!0,b="",h.compression="STORE",e="string");var s=null;s=b instanceof j||b instanceof g?b:m.isNode&&m.isStream(b)?new n(a,b):f.prepareContent(a,b,h.binary,h.optimizedBinaryString,h.base64);var t=new k(a,s,h);this.files[a]=t},p=function(a){"/"===a.slice(-1)&&(a=a.substring(0,a.length-1));var b=a.lastIndexOf("/");return b>0?a.substring(0,b):""},q=function(a){return"/"!==a.slice(-1)&&(a+="/"),a},r=function(a,b){return b="undefined"!=typeof b?b:i.createFolders,a=q(a),this.files[a]||o.call(this,a,null,{dir:!0,createFolders:b}),this.files[a]},s={load:function(){throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide.")},forEach:function(a){var b,c,d;for(b in this.files)this.files.hasOwnProperty(b)&&(d=this.files[b],c=b.slice(this.root.length,b.length),c&&b.slice(0,this.root.length)===this.root&&a(c,d))},filter:function(a){var b=[];return this.forEach(function(c,d){a(c,d)&&b.push(d)}),b},file:function(a,b,c){if(1===arguments.length){if(d(a)){var e=a;return this.filter(function(a,b){return!b.dir&&e.test(a)})}var f=this.files[this.root+a];return f&&!f.dir?f:null}return a=this.root+a,o.call(this,a,b,c),this},folder:function(a){if(!a)return this;if(d(a))return this.filter(function(b,c){return c.dir&&a.test(b)});var b=this.root+a,c=r.call(this,b),e=this.clone();return e.root=c.name,e},remove:function(a){a=this.root+a;var b=this.files[a];if(b||("/"!==a.slice(-1)&&(a+="/"),b=this.files[a]),b&&!b.dir)delete this.files[a];else for(var c=this.filter(function(b,c){return c.name.slice(0,a.length)===a}),d=0;d<c.length;d++)delete this.files[c[d].name];return this},generate:function(a){throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide.")},generateInternalStream:function(a){var b,c={};try{if(c=f.extend(a||{},{streamFiles:!1,compression:"STORE",compressionOptions:null,type:"",platform:"DOS",comment:null,mimeType:"application/zip",encodeFileName:e.utf8encode}),c.type=c.type.toLowerCase(),c.compression=c.compression.toUpperCase(),"binarystring"===c.type&&(c.type="string"),!c.type)throw new Error("No output type specified.");f.checkSupport(c.type),"darwin"!==c.platform&&"freebsd"!==c.platform&&"linux"!==c.platform&&"sunos"!==c.platform||(c.platform="UNIX"),"win32"===c.platform&&(c.platform="DOS");var d=c.comment||this.comment||"";b=l.generateWorker(this,c,d)}catch(i){b=new g("error"),b.error(i)}return new h(b,c.type||"string",c.mimeType)},generateAsync:function(a,b){return this.generateInternalStream(a).accumulate(b)},generateNodeStream:function(a,b){return a=a||{},a.type||(a.type="nodebuffer"),this.generateInternalStream(a).toNodejsStream(b)}};b.exports=s},{"./compressedObject":2,"./defaults":5,"./generate":9,"./nodejs/NodejsStreamInputAdapter":12,"./nodejsUtils":14,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31,"./utils":32,"./zipObject":35}],16:[function(a,b,c){b.exports=a("stream")},{stream:void 0}],17:[function(a,b,c){"use strict";function d(a){e.call(this,a);for(var b=0;b<this.data.length;b++)a[b]=255&a[b]}var e=a("./DataReader"),f=a("../utils");f.inherits(d,e),d.prototype.byteAt=function(a){return this.data[this.zero+a]},d.prototype.lastIndexOfSignature=function(a){for(var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.length-4;f>=0;--f)if(this.data[f]===b&&this.data[f+1]===c&&this.data[f+2]===d&&this.data[f+3]===e)return f-this.zero;return-1},d.prototype.readAndCheckSignature=function(a){var b=a.charCodeAt(0),c=a.charCodeAt(1),d=a.charCodeAt(2),e=a.charCodeAt(3),f=this.readData(4);return b===f[0]&&c===f[1]&&d===f[2]&&e===f[3]},d.prototype.readData=function(a){if(this.checkOffset(a),0===a)return[];var b=this.data.slice(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./DataReader":18}],18:[function(a,b,c){"use strict";function d(a){this.data=a,this.length=a.length,this.index=0,this.zero=0}var e=a("../utils");d.prototype={checkOffset:function(a){this.checkIndex(this.index+a)},checkIndex:function(a){if(this.length<this.zero+a||a<0)throw new Error("End of data reached (data length = "+this.length+", asked index = "+a+"). Corrupted zip ?")},setIndex:function(a){this.checkIndex(a),this.index=a},skip:function(a){this.setIndex(this.index+a)},byteAt:function(a){},readInt:function(a){var b,c=0;for(this.checkOffset(a),b=this.index+a-1;b>=this.index;b--)c=(c<<8)+this.byteAt(b);return this.index+=a,c},readString:function(a){return e.transformTo("string",this.readData(a))},readData:function(a){},lastIndexOfSignature:function(a){},readAndCheckSignature:function(a){},readDate:function(){var a=this.readInt(4);return new Date(Date.UTC((a>>25&127)+1980,(a>>21&15)-1,a>>16&31,a>>11&31,a>>5&63,(31&a)<<1))}},b.exports=d},{"../utils":32}],19:[function(a,b,c){"use strict";function d(a){e.call(this,a)}var e=a("./Uint8ArrayReader"),f=a("../utils");f.inherits(d,e),d.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./Uint8ArrayReader":21}],20:[function(a,b,c){"use strict";function d(a){e.call(this,a)}var e=a("./DataReader"),f=a("../utils");f.inherits(d,e),d.prototype.byteAt=function(a){return this.data.charCodeAt(this.zero+a)},d.prototype.lastIndexOfSignature=function(a){return this.data.lastIndexOf(a)-this.zero},d.prototype.readAndCheckSignature=function(a){var b=this.readData(4);return a===b},d.prototype.readData=function(a){this.checkOffset(a);var b=this.data.slice(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./DataReader":18}],21:[function(a,b,c){"use strict";function d(a){e.call(this,a)}var e=a("./ArrayReader"),f=a("../utils");f.inherits(d,e),d.prototype.readData=function(a){if(this.checkOffset(a),0===a)return new Uint8Array(0);var b=this.data.subarray(this.zero+this.index,this.zero+this.index+a);return this.index+=a,b},b.exports=d},{"../utils":32,"./ArrayReader":17}],22:[function(a,b,c){"use strict";var d=a("../utils"),e=a("../support"),f=a("./ArrayReader"),g=a("./StringReader"),h=a("./NodeBufferReader"),i=a("./Uint8ArrayReader");b.exports=function(a){var b=d.getTypeOf(a);return d.checkSupport(b),"string"!==b||e.uint8array?"nodebuffer"===b?new h(a):e.uint8array?new i(d.transformTo("uint8array",a)):new f(d.transformTo("array",a)):new g(a)}},{"../support":30,"../utils":32,"./ArrayReader":17,"./NodeBufferReader":19,"./StringReader":20,"./Uint8ArrayReader":21}],23:[function(a,b,c){"use strict";c.LOCAL_FILE_HEADER="PK",c.CENTRAL_FILE_HEADER="PK",c.CENTRAL_DIRECTORY_END="PK",c.ZIP64_CENTRAL_DIRECTORY_LOCATOR="PK",c.ZIP64_CENTRAL_DIRECTORY_END="PK",c.DATA_DESCRIPTOR="PK\b"},{}],24:[function(a,b,c){"use strict";function d(a){e.call(this,"ConvertWorker to "+a),this.destType=a}var e=a("./GenericWorker"),f=a("../utils");f.inherits(d,e),d.prototype.processChunk=function(a){this.push({data:f.transformTo(this.destType,a.data),meta:a.meta})},b.exports=d},{"../utils":32,"./GenericWorker":28}],25:[function(a,b,c){"use strict";function d(){e.call(this,"Crc32Probe"),this.withStreamInfo("crc32",0)}var e=a("./GenericWorker"),f=a("../crc32"),g=a("../utils");g.inherits(d,e),d.prototype.processChunk=function(a){this.streamInfo.crc32=f(a.data,this.streamInfo.crc32||0),this.push(a)},b.exports=d},{"../crc32":4,"../utils":32,"./GenericWorker":28}],26:[function(a,b,c){"use strict";function d(a){f.call(this,"DataLengthProbe for "+a),this.propName=a,this.withStreamInfo(a,0)}var e=a("../utils"),f=a("./GenericWorker");e.inherits(d,f),d.prototype.processChunk=function(a){if(a){var b=this.streamInfo[this.propName]||0;this.streamInfo[this.propName]=b+a.data.length}f.prototype.processChunk.call(this,a)},b.exports=d},{"../utils":32,"./GenericWorker":28}],27:[function(a,b,c){"use strict";function d(a){f.call(this,"DataWorker");var b=this;this.dataIsReady=!1,this.index=0,this.max=0,this.data=null,this.type="",this._tickScheduled=!1,a.then(function(a){b.dataIsReady=!0,b.data=a,b.max=a&&a.length||0,b.type=e.getTypeOf(a),b.isPaused||b._tickAndRepeat()},function(a){b.error(a)})}var e=a("../utils"),f=a("./GenericWorker"),g=16384;e.inherits(d,f),d.prototype.cleanUp=function(){f.prototype.cleanUp.call(this),this.data=null},d.prototype.resume=function(){return!!f.prototype.resume.call(this)&&(!this._tickScheduled&&this.dataIsReady&&(this._tickScheduled=!0,e.delay(this._tickAndRepeat,[],this)),!0)},d.prototype._tickAndRepeat=function(){this._tickScheduled=!1,this.isPaused||this.isFinished||(this._tick(),this.isFinished||(e.delay(this._tickAndRepeat,[],this),this._tickScheduled=!0))},d.prototype._tick=function(){if(this.isPaused||this.isFinished)return!1;var a=g,b=null,c=Math.min(this.max,this.index+a);if(this.index>=this.max)return this.end();switch(this.type){case"string":b=this.data.substring(this.index,c);break;case"uint8array":b=this.data.subarray(this.index,c);break;case"array":case"nodebuffer":b=this.data.slice(this.index,c)}return this.index=c,this.push({data:b,meta:{percent:this.max?this.index/this.max*100:0}})},b.exports=d},{"../utils":32,"./GenericWorker":28}],28:[function(a,b,c){"use strict";function d(a){this.name=a||"default",this.streamInfo={},this.generatedError=null,this.extraStreamInfo={},this.isPaused=!0,this.isFinished=!1,this.isLocked=!1,this._listeners={data:[],end:[],error:[]},this.previous=null}d.prototype={push:function(a){this.emit("data",a)},end:function(){if(this.isFinished)return!1;this.flush();try{this.emit("end"),this.cleanUp(),this.isFinished=!0}catch(a){this.emit("error",a)}return!0},error:function(a){return!this.isFinished&&(this.isPaused?this.generatedError=a:(this.isFinished=!0,this.emit("error",a),this.previous&&this.previous.error(a),this.cleanUp()),!0)},on:function(a,b){return this._listeners[a].push(b),this},cleanUp:function(){this.streamInfo=this.generatedError=this.extraStreamInfo=null,this._listeners=[]},emit:function(a,b){if(this._listeners[a])for(var c=0;c<this._listeners[a].length;c++)this._listeners[a][c].call(this,b)},pipe:function(a){return a.registerPrevious(this)},registerPrevious:function(a){if(this.isLocked)throw new Error("The stream '"+this+"' has already been used.");this.streamInfo=a.streamInfo,this.mergeStreamInfo(),this.previous=a;var b=this;return a.on("data",function(a){b.processChunk(a)}),a.on("end",function(){b.end()}),a.on("error",function(a){b.error(a)}),this},pause:function(){return!this.isPaused&&!this.isFinished&&(this.isPaused=!0,this.previous&&this.previous.pause(),!0)},resume:function(){if(!this.isPaused||this.isFinished)return!1;this.isPaused=!1;var a=!1;return this.generatedError&&(this.error(this.generatedError),a=!0),this.previous&&this.previous.resume(),!a},flush:function(){},processChunk:function(a){this.push(a)},withStreamInfo:function(a,b){return this.extraStreamInfo[a]=b,this.mergeStreamInfo(),this},mergeStreamInfo:function(){for(var a in this.extraStreamInfo)this.extraStreamInfo.hasOwnProperty(a)&&(this.streamInfo[a]=this.extraStreamInfo[a])},lock:function(){if(this.isLocked)throw new Error("The stream '"+this+"' has already been used.");this.isLocked=!0,this.previous&&this.previous.lock()},toString:function(){var a="Worker "+this.name;return this.previous?this.previous+" -> "+a:a}},b.exports=d},{}],29:[function(a,b,c){"use strict";function d(a,b,c){switch(a){case"blob":return h.newBlob(h.transformTo("arraybuffer",b),c);case"base64":return k.encode(b);default:return h.transformTo(a,b)}}function e(a,b){var c,d=0,e=null,f=0;for(c=0;c<b.length;c++)f+=b[c].length;switch(a){case"string":return b.join("");case"array":return Array.prototype.concat.apply([],b);case"uint8array":for(e=new Uint8Array(f),c=0;c<b.length;c++)e.set(b[c],d),d+=b[c].length;return e;case"nodebuffer":return Buffer.concat(b);default:throw new Error("concat : unsupported type '"+a+"'")}}function f(a,b){return new m.Promise(function(c,f){var g=[],h=a._internalType,i=a._outputType,j=a._mimeType;a.on("data",function(a,c){g.push(a),b&&b(c)}).on("error",function(a){g=[],f(a)}).on("end",function(){try{var a=d(i,e(h,g),j);c(a)}catch(b){f(b)}g=[]}).resume()})}function g(a,b,c){var d=b;switch(b){case"blob":case"arraybuffer":d="uint8array";break;case"base64":d="string"}try{this._internalType=d,this._outputType=b,this._mimeType=c,h.checkSupport(d),this._worker=a.pipe(new i(d)),a.lock()}catch(e){this._worker=new j("error"),this._worker.error(e)}}var h=a("../utils"),i=a("./ConvertWorker"),j=a("./GenericWorker"),k=a("../base64"),l=a("../support"),m=a("../external"),n=null;if(l.nodestream)try{n=a("../nodejs/NodejsStreamOutputAdapter")}catch(o){}g.prototype={accumulate:function(a){return f(this,a)},on:function(a,b){var c=this;return"data"===a?this._worker.on(a,function(a){b.call(c,a.data,a.meta)}):this._worker.on(a,function(){h.delay(b,arguments,c)}),this},resume:function(){return h.delay(this._worker.resume,[],this._worker),this},pause:function(){return this._worker.pause(),this},toNodejsStream:function(a){if(h.checkSupport("nodestream"),"nodebuffer"!==this._outputType)throw new Error(this._outputType+" is not supported by this method");return new n(this,{objectMode:"nodebuffer"!==this._outputType},a)}},b.exports=g},{"../base64":1,"../external":6,"../nodejs/NodejsStreamOutputAdapter":13,"../support":30,"../utils":32,"./ConvertWorker":24,"./GenericWorker":28}],30:[function(a,b,c){"use strict";if(c.base64=!0,c.array=!0,c.string=!0,c.arraybuffer="undefined"!=typeof ArrayBuffer&&"undefined"!=typeof Uint8Array,c.nodebuffer="undefined"!=typeof Buffer,c.uint8array="undefined"!=typeof Uint8Array,"undefined"==typeof ArrayBuffer)c.blob=!1;else{var d=new ArrayBuffer(0);try{c.blob=0===new Blob([d],{type:"application/zip"}).size}catch(e){try{var f=self.BlobBuilder||self.WebKitBlobBuilder||self.MozBlobBuilder||self.MSBlobBuilder,g=new f;g.append(d),c.blob=0===g.getBlob("application/zip").size}catch(e){c.blob=!1}}}try{c.nodestream=!!a("readable-stream").Readable}catch(e){c.nodestream=!1}},{"readable-stream":16}],31:[function(a,b,c){"use strict";function d(){i.call(this,"utf-8 decode"),this.leftOver=null}function e(){i.call(this,"utf-8 encode")}for(var f=a("./utils"),g=a("./support"),h=a("./nodejsUtils"),i=a("./stream/GenericWorker"),j=new Array(256),k=0;k<256;k++)j[k]=k>=252?6:k>=248?5:k>=240?4:k>=224?3:k>=192?2:1;j[254]=j[254]=1;var l=function(a){var b,c,d,e,f,h=a.length,i=0;for(e=0;e<h;e++)c=a.charCodeAt(e),55296===(64512&c)&&e+1<h&&(d=a.charCodeAt(e+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),e++)),i+=c<128?1:c<2048?2:c<65536?3:4;for(b=g.uint8array?new Uint8Array(i):new Array(i),f=0,e=0;f<i;e++)c=a.charCodeAt(e),55296===(64512&c)&&e+1<h&&(d=a.charCodeAt(e+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),e++)),c<128?b[f++]=c:c<2048?(b[f++]=192|c>>>6,b[f++]=128|63&c):c<65536?(b[f++]=224|c>>>12,b[f++]=128|c>>>6&63,b[f++]=128|63&c):(b[f++]=240|c>>>18,b[f++]=128|c>>>12&63,b[f++]=128|c>>>6&63,b[f++]=128|63&c);return b},m=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return c<0?b:0===c?b:c+j[a[c]]>b?c:b},n=function(a){var b,c,d,e,g=a.length,h=new Array(2*g);for(c=0,b=0;b<g;)if(d=a[b++],d<128)h[c++]=d;else if(e=j[d],e>4)h[c++]=65533,b+=e-1;else{for(d&=2===e?31:3===e?15:7;e>1&&b<g;)d=d<<6|63&a[b++],e--;e>1?h[c++]=65533:d<65536?h[c++]=d:(d-=65536,h[c++]=55296|d>>10&1023,h[c++]=56320|1023&d)}return h.length!==c&&(h.subarray?h=h.subarray(0,c):h.length=c),f.applyFromCharCode(h)};c.utf8encode=function(a){return g.nodebuffer?h.newBufferFrom(a,"utf-8"):l(a)},c.utf8decode=function(a){return g.nodebuffer?f.transformTo("nodebuffer",a).toString("utf-8"):(a=f.transformTo(g.uint8array?"uint8array":"array",a),n(a))},f.inherits(d,i),d.prototype.processChunk=function(a){var b=f.transformTo(g.uint8array?"uint8array":"array",a.data);if(this.leftOver&&this.leftOver.length){if(g.uint8array){var d=b;b=new Uint8Array(d.length+this.leftOver.length),b.set(this.leftOver,0),b.set(d,this.leftOver.length)}else b=this.leftOver.concat(b);this.leftOver=null}var e=m(b),h=b;e!==b.length&&(g.uint8array?(h=b.subarray(0,e),this.leftOver=b.subarray(e,b.length)):(h=b.slice(0,e),this.leftOver=b.slice(e,b.length))),this.push({data:c.utf8decode(h),meta:a.meta})},d.prototype.flush=function(){this.leftOver&&this.leftOver.length&&(this.push({data:c.utf8decode(this.leftOver),meta:{}}),this.leftOver=null)},c.Utf8DecodeWorker=d,f.inherits(e,i),e.prototype.processChunk=function(a){this.push({data:c.utf8encode(a.data),meta:a.meta})},c.Utf8EncodeWorker=e},{"./nodejsUtils":14,"./stream/GenericWorker":28,"./support":30,"./utils":32}],32:[function(a,b,c){"use strict";function d(a){var b=null;return b=i.uint8array?new Uint8Array(a.length):new Array(a.length),f(a,b)}function e(a){return a}function f(a,b){for(var c=0;c<a.length;++c)b[c]=255&a.charCodeAt(c);return b}function g(a){var b=65536,d=c.getTypeOf(a),e=!0;if("uint8array"===d?e=n.applyCanBeUsed.uint8array:"nodebuffer"===d&&(e=n.applyCanBeUsed.nodebuffer),e)for(;b>1;)try{return n.stringifyByChunk(a,d,b)}catch(f){b=Math.floor(b/2)}return n.stringifyByChar(a)}function h(a,b){for(var c=0;c<a.length;c++)b[c]=a[c];
return b}var i=a("./support"),j=a("./base64"),k=a("./nodejsUtils"),l=a("core-js/library/fn/set-immediate"),m=a("./external");c.newBlob=function(a,b){c.checkSupport("blob");try{return new Blob([a],{type:b})}catch(d){try{var e=self.BlobBuilder||self.WebKitBlobBuilder||self.MozBlobBuilder||self.MSBlobBuilder,f=new e;return f.append(a),f.getBlob(b)}catch(d){throw new Error("Bug : can't construct the Blob.")}}};var n={stringifyByChunk:function(a,b,c){var d=[],e=0,f=a.length;if(f<=c)return String.fromCharCode.apply(null,a);for(;e<f;)"array"===b||"nodebuffer"===b?d.push(String.fromCharCode.apply(null,a.slice(e,Math.min(e+c,f)))):d.push(String.fromCharCode.apply(null,a.subarray(e,Math.min(e+c,f)))),e+=c;return d.join("")},stringifyByChar:function(a){for(var b="",c=0;c<a.length;c++)b+=String.fromCharCode(a[c]);return b},applyCanBeUsed:{uint8array:function(){try{return i.uint8array&&1===String.fromCharCode.apply(null,new Uint8Array(1)).length}catch(a){return!1}}(),nodebuffer:function(){try{return i.nodebuffer&&1===String.fromCharCode.apply(null,k.allocBuffer(1)).length}catch(a){return!1}}()}};c.applyFromCharCode=g;var o={};o.string={string:e,array:function(a){return f(a,new Array(a.length))},arraybuffer:function(a){return o.string.uint8array(a).buffer},uint8array:function(a){return f(a,new Uint8Array(a.length))},nodebuffer:function(a){return f(a,k.allocBuffer(a.length))}},o.array={string:g,array:e,arraybuffer:function(a){return new Uint8Array(a).buffer},uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return k.newBufferFrom(a)}},o.arraybuffer={string:function(a){return g(new Uint8Array(a))},array:function(a){return h(new Uint8Array(a),new Array(a.byteLength))},arraybuffer:e,uint8array:function(a){return new Uint8Array(a)},nodebuffer:function(a){return k.newBufferFrom(new Uint8Array(a))}},o.uint8array={string:g,array:function(a){return h(a,new Array(a.length))},arraybuffer:function(a){return a.buffer},uint8array:e,nodebuffer:function(a){return k.newBufferFrom(a)}},o.nodebuffer={string:g,array:function(a){return h(a,new Array(a.length))},arraybuffer:function(a){return o.nodebuffer.uint8array(a).buffer},uint8array:function(a){return h(a,new Uint8Array(a.length))},nodebuffer:e},c.transformTo=function(a,b){if(b||(b=""),!a)return b;c.checkSupport(a);var d=c.getTypeOf(b),e=o[d][a](b);return e},c.getTypeOf=function(a){return"string"==typeof a?"string":"[object Array]"===Object.prototype.toString.call(a)?"array":i.nodebuffer&&k.isBuffer(a)?"nodebuffer":i.uint8array&&a instanceof Uint8Array?"uint8array":i.arraybuffer&&a instanceof ArrayBuffer?"arraybuffer":void 0},c.checkSupport=function(a){var b=i[a.toLowerCase()];if(!b)throw new Error(a+" is not supported by this platform")},c.MAX_VALUE_16BITS=65535,c.MAX_VALUE_32BITS=-1,c.pretty=function(a){var b,c,d="";for(c=0;c<(a||"").length;c++)b=a.charCodeAt(c),d+="\\x"+(b<16?"0":"")+b.toString(16).toUpperCase();return d},c.delay=function(a,b,c){l(function(){a.apply(c||null,b||[])})},c.inherits=function(a,b){var c=function(){};c.prototype=b.prototype,a.prototype=new c},c.extend=function(){var a,b,c={};for(a=0;a<arguments.length;a++)for(b in arguments[a])arguments[a].hasOwnProperty(b)&&"undefined"==typeof c[b]&&(c[b]=arguments[a][b]);return c},c.prepareContent=function(a,b,e,f,g){var h=m.Promise.resolve(b).then(function(a){var b=i.blob&&(a instanceof Blob||["[object File]","[object Blob]"].indexOf(Object.prototype.toString.call(a))!==-1);return b&&"undefined"!=typeof FileReader?new m.Promise(function(b,c){var d=new FileReader;d.onload=function(a){b(a.target.result)},d.onerror=function(a){c(a.target.error)},d.readAsArrayBuffer(a)}):a});return h.then(function(b){var h=c.getTypeOf(b);return h?("arraybuffer"===h?b=c.transformTo("uint8array",b):"string"===h&&(g?b=j.decode(b):e&&f!==!0&&(b=d(b))),b):m.Promise.reject(new Error("Can't read the data of '"+a+"'. Is it in a supported JavaScript type (String, Blob, ArrayBuffer, etc) ?"))})}},{"./base64":1,"./external":6,"./nodejsUtils":14,"./support":30,"core-js/library/fn/set-immediate":36}],33:[function(a,b,c){"use strict";function d(a){this.files=[],this.loadOptions=a}var e=a("./reader/readerFor"),f=a("./utils"),g=a("./signature"),h=a("./zipEntry"),i=(a("./utf8"),a("./support"));d.prototype={checkSignature:function(a){if(!this.reader.readAndCheckSignature(a)){this.reader.index-=4;var b=this.reader.readString(4);throw new Error("Corrupted zip or bug: unexpected signature ("+f.pretty(b)+", expected "+f.pretty(a)+")")}},isSignature:function(a,b){var c=this.reader.index;this.reader.setIndex(a);var d=this.reader.readString(4),e=d===b;return this.reader.setIndex(c),e},readBlockEndOfCentral:function(){this.diskNumber=this.reader.readInt(2),this.diskWithCentralDirStart=this.reader.readInt(2),this.centralDirRecordsOnThisDisk=this.reader.readInt(2),this.centralDirRecords=this.reader.readInt(2),this.centralDirSize=this.reader.readInt(4),this.centralDirOffset=this.reader.readInt(4),this.zipCommentLength=this.reader.readInt(2);var a=this.reader.readData(this.zipCommentLength),b=i.uint8array?"uint8array":"array",c=f.transformTo(b,a);this.zipComment=this.loadOptions.decodeFileName(c)},readBlockZip64EndOfCentral:function(){this.zip64EndOfCentralSize=this.reader.readInt(8),this.reader.skip(4),this.diskNumber=this.reader.readInt(4),this.diskWithCentralDirStart=this.reader.readInt(4),this.centralDirRecordsOnThisDisk=this.reader.readInt(8),this.centralDirRecords=this.reader.readInt(8),this.centralDirSize=this.reader.readInt(8),this.centralDirOffset=this.reader.readInt(8),this.zip64ExtensibleData={};for(var a,b,c,d=this.zip64EndOfCentralSize-44,e=0;e<d;)a=this.reader.readInt(2),b=this.reader.readInt(4),c=this.reader.readData(b),this.zip64ExtensibleData[a]={id:a,length:b,value:c}},readBlockZip64EndOfCentralLocator:function(){if(this.diskWithZip64CentralDirStart=this.reader.readInt(4),this.relativeOffsetEndOfZip64CentralDir=this.reader.readInt(8),this.disksCount=this.reader.readInt(4),this.disksCount>1)throw new Error("Multi-volumes zip are not supported")},readLocalFiles:function(){var a,b;for(a=0;a<this.files.length;a++)b=this.files[a],this.reader.setIndex(b.localHeaderOffset),this.checkSignature(g.LOCAL_FILE_HEADER),b.readLocalPart(this.reader),b.handleUTF8(),b.processAttributes()},readCentralDir:function(){var a;for(this.reader.setIndex(this.centralDirOffset);this.reader.readAndCheckSignature(g.CENTRAL_FILE_HEADER);)a=new h({zip64:this.zip64},this.loadOptions),a.readCentralPart(this.reader),this.files.push(a);if(this.centralDirRecords!==this.files.length&&0!==this.centralDirRecords&&0===this.files.length)throw new Error("Corrupted zip or bug: expected "+this.centralDirRecords+" records in central dir, got "+this.files.length)},readEndOfCentral:function(){var a=this.reader.lastIndexOfSignature(g.CENTRAL_DIRECTORY_END);if(a<0){var b=!this.isSignature(0,g.LOCAL_FILE_HEADER);throw b?new Error("Can't find end of central directory : is this a zip file ? If it is, see https://stuk.github.io/jszip/documentation/howto/read_zip.html"):new Error("Corrupted zip: can't find end of central directory")}this.reader.setIndex(a);var c=a;if(this.checkSignature(g.CENTRAL_DIRECTORY_END),this.readBlockEndOfCentral(),this.diskNumber===f.MAX_VALUE_16BITS||this.diskWithCentralDirStart===f.MAX_VALUE_16BITS||this.centralDirRecordsOnThisDisk===f.MAX_VALUE_16BITS||this.centralDirRecords===f.MAX_VALUE_16BITS||this.centralDirSize===f.MAX_VALUE_32BITS||this.centralDirOffset===f.MAX_VALUE_32BITS){if(this.zip64=!0,a=this.reader.lastIndexOfSignature(g.ZIP64_CENTRAL_DIRECTORY_LOCATOR),a<0)throw new Error("Corrupted zip: can't find the ZIP64 end of central directory locator");if(this.reader.setIndex(a),this.checkSignature(g.ZIP64_CENTRAL_DIRECTORY_LOCATOR),this.readBlockZip64EndOfCentralLocator(),!this.isSignature(this.relativeOffsetEndOfZip64CentralDir,g.ZIP64_CENTRAL_DIRECTORY_END)&&(this.relativeOffsetEndOfZip64CentralDir=this.reader.lastIndexOfSignature(g.ZIP64_CENTRAL_DIRECTORY_END),this.relativeOffsetEndOfZip64CentralDir<0))throw new Error("Corrupted zip: can't find the ZIP64 end of central directory");this.reader.setIndex(this.relativeOffsetEndOfZip64CentralDir),this.checkSignature(g.ZIP64_CENTRAL_DIRECTORY_END),this.readBlockZip64EndOfCentral()}var d=this.centralDirOffset+this.centralDirSize;this.zip64&&(d+=20,d+=12+this.zip64EndOfCentralSize);var e=c-d;if(e>0)this.isSignature(c,g.CENTRAL_FILE_HEADER)||(this.reader.zero=e);else if(e<0)throw new Error("Corrupted zip: missing "+Math.abs(e)+" bytes.")},prepareReader:function(a){this.reader=e(a)},load:function(a){this.prepareReader(a),this.readEndOfCentral(),this.readCentralDir(),this.readLocalFiles()}},b.exports=d},{"./reader/readerFor":22,"./signature":23,"./support":30,"./utf8":31,"./utils":32,"./zipEntry":34}],34:[function(a,b,c){"use strict";function d(a,b){this.options=a,this.loadOptions=b}var e=a("./reader/readerFor"),f=a("./utils"),g=a("./compressedObject"),h=a("./crc32"),i=a("./utf8"),j=a("./compressions"),k=a("./support"),l=0,m=3,n=function(a){for(var b in j)if(j.hasOwnProperty(b)&&j[b].magic===a)return j[b];return null};d.prototype={isEncrypted:function(){return 1===(1&this.bitFlag)},useUTF8:function(){return 2048===(2048&this.bitFlag)},readLocalPart:function(a){var b,c;if(a.skip(22),this.fileNameLength=a.readInt(2),c=a.readInt(2),this.fileName=a.readData(this.fileNameLength),a.skip(c),this.compressedSize===-1||this.uncompressedSize===-1)throw new Error("Bug or corrupted zip : didn't get enough informations from the central directory (compressedSize === -1 || uncompressedSize === -1)");if(b=n(this.compressionMethod),null===b)throw new Error("Corrupted zip : compression "+f.pretty(this.compressionMethod)+" unknown (inner file : "+f.transformTo("string",this.fileName)+")");this.decompressed=new g(this.compressedSize,this.uncompressedSize,this.crc32,b,a.readData(this.compressedSize))},readCentralPart:function(a){this.versionMadeBy=a.readInt(2),a.skip(2),this.bitFlag=a.readInt(2),this.compressionMethod=a.readString(2),this.date=a.readDate(),this.crc32=a.readInt(4),this.compressedSize=a.readInt(4),this.uncompressedSize=a.readInt(4);var b=a.readInt(2);if(this.extraFieldsLength=a.readInt(2),this.fileCommentLength=a.readInt(2),this.diskNumberStart=a.readInt(2),this.internalFileAttributes=a.readInt(2),this.externalFileAttributes=a.readInt(4),this.localHeaderOffset=a.readInt(4),this.isEncrypted())throw new Error("Encrypted zip are not supported");a.skip(b),this.readExtraFields(a),this.parseZIP64ExtraField(a),this.fileComment=a.readData(this.fileCommentLength)},processAttributes:function(){this.unixPermissions=null,this.dosPermissions=null;var a=this.versionMadeBy>>8;this.dir=!!(16&this.externalFileAttributes),a===l&&(this.dosPermissions=63&this.externalFileAttributes),a===m&&(this.unixPermissions=this.externalFileAttributes>>16&65535),this.dir||"/"!==this.fileNameStr.slice(-1)||(this.dir=!0)},parseZIP64ExtraField:function(a){if(this.extraFields[1]){var b=e(this.extraFields[1].value);this.uncompressedSize===f.MAX_VALUE_32BITS&&(this.uncompressedSize=b.readInt(8)),this.compressedSize===f.MAX_VALUE_32BITS&&(this.compressedSize=b.readInt(8)),this.localHeaderOffset===f.MAX_VALUE_32BITS&&(this.localHeaderOffset=b.readInt(8)),this.diskNumberStart===f.MAX_VALUE_32BITS&&(this.diskNumberStart=b.readInt(4))}},readExtraFields:function(a){var b,c,d,e=a.index+this.extraFieldsLength;for(this.extraFields||(this.extraFields={});a.index<e;)b=a.readInt(2),c=a.readInt(2),d=a.readData(c),this.extraFields[b]={id:b,length:c,value:d}},handleUTF8:function(){var a=k.uint8array?"uint8array":"array";if(this.useUTF8())this.fileNameStr=i.utf8decode(this.fileName),this.fileCommentStr=i.utf8decode(this.fileComment);else{var b=this.findExtraFieldUnicodePath();if(null!==b)this.fileNameStr=b;else{var c=f.transformTo(a,this.fileName);this.fileNameStr=this.loadOptions.decodeFileName(c)}var d=this.findExtraFieldUnicodeComment();if(null!==d)this.fileCommentStr=d;else{var e=f.transformTo(a,this.fileComment);this.fileCommentStr=this.loadOptions.decodeFileName(e)}}},findExtraFieldUnicodePath:function(){var a=this.extraFields[28789];if(a){var b=e(a.value);return 1!==b.readInt(1)?null:h(this.fileName)!==b.readInt(4)?null:i.utf8decode(b.readData(a.length-5))}return null},findExtraFieldUnicodeComment:function(){var a=this.extraFields[25461];if(a){var b=e(a.value);return 1!==b.readInt(1)?null:h(this.fileComment)!==b.readInt(4)?null:i.utf8decode(b.readData(a.length-5))}return null}},b.exports=d},{"./compressedObject":2,"./compressions":3,"./crc32":4,"./reader/readerFor":22,"./support":30,"./utf8":31,"./utils":32}],35:[function(a,b,c){"use strict";var d=a("./stream/StreamHelper"),e=a("./stream/DataWorker"),f=a("./utf8"),g=a("./compressedObject"),h=a("./stream/GenericWorker"),i=function(a,b,c){this.name=a,this.dir=c.dir,this.date=c.date,this.comment=c.comment,this.unixPermissions=c.unixPermissions,this.dosPermissions=c.dosPermissions,this._data=b,this._dataBinary=c.binary,this.options={compression:c.compression,compressionOptions:c.compressionOptions}};i.prototype={internalStream:function(a){var b=null,c="string";try{if(!a)throw new Error("No output type specified.");c=a.toLowerCase();var e="string"===c||"text"===c;"binarystring"!==c&&"text"!==c||(c="string"),b=this._decompressWorker();var g=!this._dataBinary;g&&!e&&(b=b.pipe(new f.Utf8EncodeWorker)),!g&&e&&(b=b.pipe(new f.Utf8DecodeWorker))}catch(i){b=new h("error"),b.error(i)}return new d(b,c,"")},async:function(a,b){return this.internalStream(a).accumulate(b)},nodeStream:function(a,b){return this.internalStream(a||"nodebuffer").toNodejsStream(b)},_compressWorker:function(a,b){if(this._data instanceof g&&this._data.compression.magic===a.magic)return this._data.getCompressedWorker();var c=this._decompressWorker();return this._dataBinary||(c=c.pipe(new f.Utf8EncodeWorker)),g.createWorkerFrom(c,a,b)},_decompressWorker:function(){return this._data instanceof g?this._data.getContentWorker():this._data instanceof h?this._data:new e(this._data)}};for(var j=["asText","asBinary","asNodeBuffer","asUint8Array","asArrayBuffer"],k=function(){throw new Error("This method has been removed in JSZip 3.0, please check the upgrade guide.")},l=0;l<j.length;l++)i.prototype[j[l]]=k;b.exports=i},{"./compressedObject":2,"./stream/DataWorker":27,"./stream/GenericWorker":28,"./stream/StreamHelper":29,"./utf8":31}],36:[function(a,b,c){a("../modules/web.immediate"),b.exports=a("../modules/_core").setImmediate},{"../modules/_core":40,"../modules/web.immediate":56}],37:[function(a,b,c){b.exports=function(a){if("function"!=typeof a)throw TypeError(a+" is not a function!");return a}},{}],38:[function(a,b,c){var d=a("./_is-object");b.exports=function(a){if(!d(a))throw TypeError(a+" is not an object!");return a}},{"./_is-object":51}],39:[function(a,b,c){var d={}.toString;b.exports=function(a){return d.call(a).slice(8,-1)}},{}],40:[function(a,b,c){var d=b.exports={version:"2.3.0"};"number"==typeof __e&&(__e=d)},{}],41:[function(a,b,c){var d=a("./_a-function");b.exports=function(a,b,c){if(d(a),void 0===b)return a;switch(c){case 1:return function(c){return a.call(b,c)};case 2:return function(c,d){return a.call(b,c,d)};case 3:return function(c,d,e){return a.call(b,c,d,e)}}return function(){return a.apply(b,arguments)}}},{"./_a-function":37}],42:[function(a,b,c){b.exports=!a("./_fails")(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},{"./_fails":45}],43:[function(a,b,c){var d=a("./_is-object"),e=a("./_global").document,f=d(e)&&d(e.createElement);b.exports=function(a){return f?e.createElement(a):{}}},{"./_global":46,"./_is-object":51}],44:[function(a,b,c){var d=a("./_global"),e=a("./_core"),f=a("./_ctx"),g=a("./_hide"),h="prototype",i=function(a,b,c){var j,k,l,m=a&i.F,n=a&i.G,o=a&i.S,p=a&i.P,q=a&i.B,r=a&i.W,s=n?e:e[b]||(e[b]={}),t=s[h],u=n?d:o?d[b]:(d[b]||{})[h];n&&(c=b);for(j in c)k=!m&&u&&void 0!==u[j],k&&j in s||(l=k?u[j]:c[j],s[j]=n&&"function"!=typeof u[j]?c[j]:q&&k?f(l,d):r&&u[j]==l?function(a){var b=function(b,c,d){if(this instanceof a){switch(arguments.length){case 0:return new a;case 1:return new a(b);case 2:return new a(b,c)}return new a(b,c,d)}return a.apply(this,arguments)};return b[h]=a[h],b}(l):p&&"function"==typeof l?f(Function.call,l):l,p&&((s.virtual||(s.virtual={}))[j]=l,a&i.R&&t&&!t[j]&&g(t,j,l)))};i.F=1,i.G=2,i.S=4,i.P=8,i.B=16,i.W=32,i.U=64,i.R=128,b.exports=i},{"./_core":40,"./_ctx":41,"./_global":46,"./_hide":47}],45:[function(a,b,c){b.exports=function(a){try{return!!a()}catch(b){return!0}}},{}],46:[function(a,b,c){var d=b.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=d)},{}],47:[function(a,b,c){var d=a("./_object-dp"),e=a("./_property-desc");b.exports=a("./_descriptors")?function(a,b,c){return d.f(a,b,e(1,c))}:function(a,b,c){return a[b]=c,a}},{"./_descriptors":42,"./_object-dp":52,"./_property-desc":53}],48:[function(a,b,c){b.exports=a("./_global").document&&document.documentElement},{"./_global":46}],49:[function(a,b,c){b.exports=!a("./_descriptors")&&!a("./_fails")(function(){return 7!=Object.defineProperty(a("./_dom-create")("div"),"a",{get:function(){return 7}}).a})},{"./_descriptors":42,"./_dom-create":43,"./_fails":45}],50:[function(a,b,c){b.exports=function(a,b,c){var d=void 0===c;switch(b.length){case 0:return d?a():a.call(c);case 1:return d?a(b[0]):a.call(c,b[0]);case 2:return d?a(b[0],b[1]):a.call(c,b[0],b[1]);case 3:return d?a(b[0],b[1],b[2]):a.call(c,b[0],b[1],b[2]);case 4:return d?a(b[0],b[1],b[2],b[3]):a.call(c,b[0],b[1],b[2],b[3])}return a.apply(c,b)}},{}],51:[function(a,b,c){b.exports=function(a){return"object"==typeof a?null!==a:"function"==typeof a}},{}],52:[function(a,b,c){var d=a("./_an-object"),e=a("./_ie8-dom-define"),f=a("./_to-primitive"),g=Object.defineProperty;c.f=a("./_descriptors")?Object.defineProperty:function(a,b,c){if(d(a),b=f(b,!0),d(c),e)try{return g(a,b,c)}catch(h){}if("get"in c||"set"in c)throw TypeError("Accessors not supported!");return"value"in c&&(a[b]=c.value),a}},{"./_an-object":38,"./_descriptors":42,"./_ie8-dom-define":49,"./_to-primitive":55}],53:[function(a,b,c){b.exports=function(a,b){return{enumerable:!(1&a),configurable:!(2&a),writable:!(4&a),value:b}}},{}],54:[function(a,b,c){var d,e,f,g=a("./_ctx"),h=a("./_invoke"),i=a("./_html"),j=a("./_dom-create"),k=a("./_global"),l=k.process,m=k.setImmediate,n=k.clearImmediate,o=k.MessageChannel,p=0,q={},r="onreadystatechange",s=function(){var a=+this;if(q.hasOwnProperty(a)){var b=q[a];delete q[a],b()}},t=function(a){s.call(a.data)};m&&n||(m=function(a){for(var b=[],c=1;arguments.length>c;)b.push(arguments[c++]);return q[++p]=function(){h("function"==typeof a?a:Function(a),b)},d(p),p},n=function(a){delete q[a]},"process"==a("./_cof")(l)?d=function(a){l.nextTick(g(s,a,1))}:o?(e=new o,f=e.port2,e.port1.onmessage=t,d=g(f.postMessage,f,1)):k.addEventListener&&"function"==typeof postMessage&&!k.importScripts?(d=function(a){k.postMessage(a+"","*")},k.addEventListener("message",t,!1)):d=r in j("script")?function(a){i.appendChild(j("script"))[r]=function(){i.removeChild(this),s.call(a)}}:function(a){setTimeout(g(s,a,1),0)}),b.exports={set:m,clear:n}},{"./_cof":39,"./_ctx":41,"./_dom-create":43,"./_global":46,"./_html":48,"./_invoke":50}],55:[function(a,b,c){var d=a("./_is-object");b.exports=function(a,b){if(!d(a))return a;var c,e;if(b&&"function"==typeof(c=a.toString)&&!d(e=c.call(a)))return e;if("function"==typeof(c=a.valueOf)&&!d(e=c.call(a)))return e;if(!b&&"function"==typeof(c=a.toString)&&!d(e=c.call(a)))return e;throw TypeError("Can't convert object to primitive value")}},{"./_is-object":51}],56:[function(a,b,c){var d=a("./_export"),e=a("./_task");d(d.G+d.B,{setImmediate:e.set,clearImmediate:e.clear})},{"./_export":44,"./_task":54}],57:[function(a,b,c){(function(a){"use strict";function c(){k=!0;for(var a,b,c=l.length;c;){for(b=l,l=[],a=-1;++a<c;)b[a]();c=l.length}k=!1}function d(a){1!==l.push(a)||k||e()}var e,f=a.MutationObserver||a.WebKitMutationObserver;if(f){var g=0,h=new f(c),i=a.document.createTextNode("");h.observe(i,{characterData:!0}),e=function(){i.data=g=++g%2}}else if(a.setImmediate||"undefined"==typeof a.MessageChannel)e="document"in a&&"onreadystatechange"in a.document.createElement("script")?function(){var b=a.document.createElement("script");b.onreadystatechange=function(){c(),b.onreadystatechange=null,b.parentNode.removeChild(b),b=null},a.document.documentElement.appendChild(b)}:function(){setTimeout(c,0)};else{var j=new a.MessageChannel;j.port1.onmessage=c,e=function(){j.port2.postMessage(0)}}var k,l=[];b.exports=d}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],58:[function(a,b,c){"use strict";function d(){}function e(a){if("function"!=typeof a)throw new TypeError("resolver must be a function");this.state=s,this.queue=[],this.outcome=void 0,a!==d&&i(this,a)}function f(a,b,c){this.promise=a,"function"==typeof b&&(this.onFulfilled=b,this.callFulfilled=this.otherCallFulfilled),"function"==typeof c&&(this.onRejected=c,this.callRejected=this.otherCallRejected)}function g(a,b,c){o(function(){var d;try{d=b(c)}catch(e){return p.reject(a,e)}d===a?p.reject(a,new TypeError("Cannot resolve promise with itself")):p.resolve(a,d)})}function h(a){var b=a&&a.then;if(a&&("object"==typeof a||"function"==typeof a)&&"function"==typeof b)return function(){b.apply(a,arguments)}}function i(a,b){function c(b){f||(f=!0,p.reject(a,b))}function d(b){f||(f=!0,p.resolve(a,b))}function e(){b(d,c)}var f=!1,g=j(e);"error"===g.status&&c(g.value)}function j(a,b){var c={};try{c.value=a(b),c.status="success"}catch(d){c.status="error",c.value=d}return c}function k(a){return a instanceof this?a:p.resolve(new this(d),a)}function l(a){var b=new this(d);return p.reject(b,a)}function m(a){function b(a,b){function d(a){g[b]=a,++h!==e||f||(f=!0,p.resolve(j,g))}c.resolve(a).then(d,function(a){f||(f=!0,p.reject(j,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=new Array(e),h=0,i=-1,j=new this(d);++i<e;)b(a[i],i);return j}function n(a){function b(a){c.resolve(a).then(function(a){f||(f=!0,p.resolve(h,a))},function(a){f||(f=!0,p.reject(h,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=-1,h=new this(d);++g<e;)b(a[g]);return h}var o=a("immediate"),p={},q=["REJECTED"],r=["FULFILLED"],s=["PENDING"];b.exports=e,e.prototype["catch"]=function(a){return this.then(null,a)},e.prototype.then=function(a,b){if("function"!=typeof a&&this.state===r||"function"!=typeof b&&this.state===q)return this;var c=new this.constructor(d);if(this.state!==s){var e=this.state===r?a:b;g(c,e,this.outcome)}else this.queue.push(new f(c,a,b));return c},f.prototype.callFulfilled=function(a){p.resolve(this.promise,a)},f.prototype.otherCallFulfilled=function(a){g(this.promise,this.onFulfilled,a)},f.prototype.callRejected=function(a){p.reject(this.promise,a)},f.prototype.otherCallRejected=function(a){g(this.promise,this.onRejected,a)},p.resolve=function(a,b){var c=j(h,b);if("error"===c.status)return p.reject(a,c.value);var d=c.value;if(d)i(a,d);else{a.state=r,a.outcome=b;for(var e=-1,f=a.queue.length;++e<f;)a.queue[e].callFulfilled(b)}return a},p.reject=function(a,b){a.state=q,a.outcome=b;for(var c=-1,d=a.queue.length;++c<d;)a.queue[c].callRejected(b);return a},e.resolve=k,e.reject=l,e.all=m,e.race=n},{immediate:57}],59:[function(a,b,c){"use strict";var d=a("./lib/utils/common").assign,e=a("./lib/deflate"),f=a("./lib/inflate"),g=a("./lib/zlib/constants"),h={};d(h,e,f,g),b.exports=h},{"./lib/deflate":60,"./lib/inflate":61,"./lib/utils/common":62,"./lib/zlib/constants":65}],60:[function(a,b,c){"use strict";function d(a){if(!(this instanceof d))return new d(a);this.options=i.assign({level:s,method:u,chunkSize:16384,windowBits:15,memLevel:8,strategy:t,to:""},a||{});var b=this.options;b.raw&&b.windowBits>0?b.windowBits=-b.windowBits:b.gzip&&b.windowBits>0&&b.windowBits<16&&(b.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new l,this.strm.avail_out=0;var c=h.deflateInit2(this.strm,b.level,b.method,b.windowBits,b.memLevel,b.strategy);if(c!==p)throw new Error(k[c]);if(b.header&&h.deflateSetHeader(this.strm,b.header),b.dictionary){var e;if(e="string"==typeof b.dictionary?j.string2buf(b.dictionary):"[object ArrayBuffer]"===m.call(b.dictionary)?new Uint8Array(b.dictionary):b.dictionary,c=h.deflateSetDictionary(this.strm,e),c!==p)throw new Error(k[c]);this._dict_set=!0}}function e(a,b){var c=new d(b);if(c.push(a,!0),c.err)throw c.msg||k[c.err];return c.result}function f(a,b){return b=b||{},b.raw=!0,e(a,b)}function g(a,b){return b=b||{},b.gzip=!0,e(a,b)}var h=a("./zlib/deflate"),i=a("./utils/common"),j=a("./utils/strings"),k=a("./zlib/messages"),l=a("./zlib/zstream"),m=Object.prototype.toString,n=0,o=4,p=0,q=1,r=2,s=-1,t=0,u=8;d.prototype.push=function(a,b){var c,d,e=this.strm,f=this.options.chunkSize;if(this.ended)return!1;d=b===~~b?b:b===!0?o:n,"string"==typeof a?e.input=j.string2buf(a):"[object ArrayBuffer]"===m.call(a)?e.input=new Uint8Array(a):e.input=a,e.next_in=0,e.avail_in=e.input.length;do{if(0===e.avail_out&&(e.output=new i.Buf8(f),e.next_out=0,e.avail_out=f),c=h.deflate(e,d),c!==q&&c!==p)return this.onEnd(c),this.ended=!0,!1;0!==e.avail_out&&(0!==e.avail_in||d!==o&&d!==r)||("string"===this.options.to?this.onData(j.buf2binstring(i.shrinkBuf(e.output,e.next_out))):this.onData(i.shrinkBuf(e.output,e.next_out)))}while((e.avail_in>0||0===e.avail_out)&&c!==q);return d===o?(c=h.deflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===p):d!==r||(this.onEnd(p),e.avail_out=0,!0)},d.prototype.onData=function(a){this.chunks.push(a)},d.prototype.onEnd=function(a){a===p&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=i.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Deflate=d,c.deflate=e,c.deflateRaw=f,c.gzip=g},{"./utils/common":62,"./utils/strings":63,"./zlib/deflate":67,"./zlib/messages":72,"./zlib/zstream":74}],61:[function(a,b,c){"use strict";function d(a){if(!(this instanceof d))return new d(a);this.options=h.assign({chunkSize:16384,windowBits:0,to:""},a||{});var b=this.options;b.raw&&b.windowBits>=0&&b.windowBits<16&&(b.windowBits=-b.windowBits,0===b.windowBits&&(b.windowBits=-15)),!(b.windowBits>=0&&b.windowBits<16)||a&&a.windowBits||(b.windowBits+=32),b.windowBits>15&&b.windowBits<48&&0===(15&b.windowBits)&&(b.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new l,this.strm.avail_out=0;var c=g.inflateInit2(this.strm,b.windowBits);if(c!==j.Z_OK)throw new Error(k[c]);this.header=new m,g.inflateGetHeader(this.strm,this.header)}function e(a,b){var c=new d(b);if(c.push(a,!0),c.err)throw c.msg||k[c.err];return c.result}function f(a,b){return b=b||{},b.raw=!0,e(a,b)}var g=a("./zlib/inflate"),h=a("./utils/common"),i=a("./utils/strings"),j=a("./zlib/constants"),k=a("./zlib/messages"),l=a("./zlib/zstream"),m=a("./zlib/gzheader"),n=Object.prototype.toString;d.prototype.push=function(a,b){var c,d,e,f,k,l,m=this.strm,o=this.options.chunkSize,p=this.options.dictionary,q=!1;if(this.ended)return!1;d=b===~~b?b:b===!0?j.Z_FINISH:j.Z_NO_FLUSH,"string"==typeof a?m.input=i.binstring2buf(a):"[object ArrayBuffer]"===n.call(a)?m.input=new Uint8Array(a):m.input=a,m.next_in=0,m.avail_in=m.input.length;do{if(0===m.avail_out&&(m.output=new h.Buf8(o),m.next_out=0,m.avail_out=o),c=g.inflate(m,j.Z_NO_FLUSH),c===j.Z_NEED_DICT&&p&&(l="string"==typeof p?i.string2buf(p):"[object ArrayBuffer]"===n.call(p)?new Uint8Array(p):p,c=g.inflateSetDictionary(this.strm,l)),c===j.Z_BUF_ERROR&&q===!0&&(c=j.Z_OK,q=!1),c!==j.Z_STREAM_END&&c!==j.Z_OK)return this.onEnd(c),this.ended=!0,!1;m.next_out&&(0!==m.avail_out&&c!==j.Z_STREAM_END&&(0!==m.avail_in||d!==j.Z_FINISH&&d!==j.Z_SYNC_FLUSH)||("string"===this.options.to?(e=i.utf8border(m.output,m.next_out),f=m.next_out-e,k=i.buf2string(m.output,e),m.next_out=f,m.avail_out=o-f,f&&h.arraySet(m.output,m.output,e,f,0),this.onData(k)):this.onData(h.shrinkBuf(m.output,m.next_out)))),0===m.avail_in&&0===m.avail_out&&(q=!0)}while((m.avail_in>0||0===m.avail_out)&&c!==j.Z_STREAM_END);return c===j.Z_STREAM_END&&(d=j.Z_FINISH),d===j.Z_FINISH?(c=g.inflateEnd(this.strm),this.onEnd(c),this.ended=!0,c===j.Z_OK):d!==j.Z_SYNC_FLUSH||(this.onEnd(j.Z_OK),m.avail_out=0,!0)},d.prototype.onData=function(a){this.chunks.push(a)},d.prototype.onEnd=function(a){a===j.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=h.flattenChunks(this.chunks)),this.chunks=[],this.err=a,this.msg=this.strm.msg},c.Inflate=d,c.inflate=e,c.inflateRaw=f,c.ungzip=e},{"./utils/common":62,"./utils/strings":63,"./zlib/constants":65,"./zlib/gzheader":68,"./zlib/inflate":70,"./zlib/messages":72,"./zlib/zstream":74}],62:[function(a,b,c){"use strict";var d="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;c.assign=function(a){for(var b=Array.prototype.slice.call(arguments,1);b.length;){var c=b.shift();if(c){if("object"!=typeof c)throw new TypeError(c+"must be non-object");for(var d in c)c.hasOwnProperty(d)&&(a[d]=c[d])}}return a},c.shrinkBuf=function(a,b){return a.length===b?a:a.subarray?a.subarray(0,b):(a.length=b,a)};var e={arraySet:function(a,b,c,d,e){if(b.subarray&&a.subarray)return void a.set(b.subarray(c,c+d),e);for(var f=0;f<d;f++)a[e+f]=b[c+f]},flattenChunks:function(a){var b,c,d,e,f,g;for(d=0,b=0,c=a.length;b<c;b++)d+=a[b].length;for(g=new Uint8Array(d),e=0,b=0,c=a.length;b<c;b++)f=a[b],g.set(f,e),e+=f.length;return g}},f={arraySet:function(a,b,c,d,e){for(var f=0;f<d;f++)a[e+f]=b[c+f]},flattenChunks:function(a){return[].concat.apply([],a)}};c.setTyped=function(a){a?(c.Buf8=Uint8Array,c.Buf16=Uint16Array,c.Buf32=Int32Array,c.assign(c,e)):(c.Buf8=Array,c.Buf16=Array,c.Buf32=Array,c.assign(c,f))},c.setTyped(d)},{}],63:[function(a,b,c){"use strict";function d(a,b){if(b<65537&&(a.subarray&&g||!a.subarray&&f))return String.fromCharCode.apply(null,e.shrinkBuf(a,b));for(var c="",d=0;d<b;d++)c+=String.fromCharCode(a[d]);return c}var e=a("./common"),f=!0,g=!0;try{String.fromCharCode.apply(null,[0])}catch(h){f=!1}try{String.fromCharCode.apply(null,new Uint8Array(1))}catch(h){g=!1}for(var i=new e.Buf8(256),j=0;j<256;j++)i[j]=j>=252?6:j>=248?5:j>=240?4:j>=224?3:j>=192?2:1;i[254]=i[254]=1,c.string2buf=function(a){var b,c,d,f,g,h=a.length,i=0;for(f=0;f<h;f++)c=a.charCodeAt(f),55296===(64512&c)&&f+1<h&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),i+=c<128?1:c<2048?2:c<65536?3:4;for(b=new e.Buf8(i),g=0,f=0;g<i;f++)c=a.charCodeAt(f),55296===(64512&c)&&f+1<h&&(d=a.charCodeAt(f+1),56320===(64512&d)&&(c=65536+(c-55296<<10)+(d-56320),f++)),c<128?b[g++]=c:c<2048?(b[g++]=192|c>>>6,b[g++]=128|63&c):c<65536?(b[g++]=224|c>>>12,b[g++]=128|c>>>6&63,b[g++]=128|63&c):(b[g++]=240|c>>>18,b[g++]=128|c>>>12&63,b[g++]=128|c>>>6&63,b[g++]=128|63&c);return b},c.buf2binstring=function(a){return d(a,a.length)},c.binstring2buf=function(a){for(var b=new e.Buf8(a.length),c=0,d=b.length;c<d;c++)b[c]=a.charCodeAt(c);return b},c.buf2string=function(a,b){var c,e,f,g,h=b||a.length,j=new Array(2*h);for(e=0,c=0;c<h;)if(f=a[c++],f<128)j[e++]=f;else if(g=i[f],g>4)j[e++]=65533,c+=g-1;else{for(f&=2===g?31:3===g?15:7;g>1&&c<h;)f=f<<6|63&a[c++],g--;g>1?j[e++]=65533:f<65536?j[e++]=f:(f-=65536,j[e++]=55296|f>>10&1023,j[e++]=56320|1023&f)}return d(j,e)},c.utf8border=function(a,b){var c;for(b=b||a.length,b>a.length&&(b=a.length),c=b-1;c>=0&&128===(192&a[c]);)c--;return c<0?b:0===c?b:c+i[a[c]]>b?c:b}},{"./common":62}],64:[function(a,b,c){"use strict";function d(a,b,c,d){for(var e=65535&a|0,f=a>>>16&65535|0,g=0;0!==c;){g=c>2e3?2e3:c,c-=g;do e=e+b[d++]|0,f=f+e|0;while(--g);e%=65521,f%=65521}return e|f<<16|0;
}b.exports=d},{}],65:[function(a,b,c){"use strict";b.exports={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8}},{}],66:[function(a,b,c){"use strict";function d(){for(var a,b=[],c=0;c<256;c++){a=c;for(var d=0;d<8;d++)a=1&a?3988292384^a>>>1:a>>>1;b[c]=a}return b}function e(a,b,c,d){var e=f,g=d+c;a^=-1;for(var h=d;h<g;h++)a=a>>>8^e[255&(a^b[h])];return a^-1}var f=d();b.exports=e},{}],67:[function(a,b,c){"use strict";function d(a,b){return a.msg=I[b],b}function e(a){return(a<<1)-(a>4?9:0)}function f(a){for(var b=a.length;--b>=0;)a[b]=0}function g(a){var b=a.state,c=b.pending;c>a.avail_out&&(c=a.avail_out),0!==c&&(E.arraySet(a.output,b.pending_buf,b.pending_out,c,a.next_out),a.next_out+=c,b.pending_out+=c,a.total_out+=c,a.avail_out-=c,b.pending-=c,0===b.pending&&(b.pending_out=0))}function h(a,b){F._tr_flush_block(a,a.block_start>=0?a.block_start:-1,a.strstart-a.block_start,b),a.block_start=a.strstart,g(a.strm)}function i(a,b){a.pending_buf[a.pending++]=b}function j(a,b){a.pending_buf[a.pending++]=b>>>8&255,a.pending_buf[a.pending++]=255&b}function k(a,b,c,d){var e=a.avail_in;return e>d&&(e=d),0===e?0:(a.avail_in-=e,E.arraySet(b,a.input,a.next_in,e,c),1===a.state.wrap?a.adler=G(a.adler,b,e,c):2===a.state.wrap&&(a.adler=H(a.adler,b,e,c)),a.next_in+=e,a.total_in+=e,e)}function l(a,b){var c,d,e=a.max_chain_length,f=a.strstart,g=a.prev_length,h=a.nice_match,i=a.strstart>a.w_size-la?a.strstart-(a.w_size-la):0,j=a.window,k=a.w_mask,l=a.prev,m=a.strstart+ka,n=j[f+g-1],o=j[f+g];a.prev_length>=a.good_match&&(e>>=2),h>a.lookahead&&(h=a.lookahead);do if(c=b,j[c+g]===o&&j[c+g-1]===n&&j[c]===j[f]&&j[++c]===j[f+1]){f+=2,c++;do;while(j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&j[++f]===j[++c]&&f<m);if(d=ka-(m-f),f=m-ka,d>g){if(a.match_start=b,g=d,d>=h)break;n=j[f+g-1],o=j[f+g]}}while((b=l[b&k])>i&&0!==--e);return g<=a.lookahead?g:a.lookahead}function m(a){var b,c,d,e,f,g=a.w_size;do{if(e=a.window_size-a.lookahead-a.strstart,a.strstart>=g+(g-la)){E.arraySet(a.window,a.window,g,g,0),a.match_start-=g,a.strstart-=g,a.block_start-=g,c=a.hash_size,b=c;do d=a.head[--b],a.head[b]=d>=g?d-g:0;while(--c);c=g,b=c;do d=a.prev[--b],a.prev[b]=d>=g?d-g:0;while(--c);e+=g}if(0===a.strm.avail_in)break;if(c=k(a.strm,a.window,a.strstart+a.lookahead,e),a.lookahead+=c,a.lookahead+a.insert>=ja)for(f=a.strstart-a.insert,a.ins_h=a.window[f],a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+1])&a.hash_mask;a.insert&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[f+ja-1])&a.hash_mask,a.prev[f&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=f,f++,a.insert--,!(a.lookahead+a.insert<ja)););}while(a.lookahead<la&&0!==a.strm.avail_in)}function n(a,b){var c=65535;for(c>a.pending_buf_size-5&&(c=a.pending_buf_size-5);;){if(a.lookahead<=1){if(m(a),0===a.lookahead&&b===J)return ua;if(0===a.lookahead)break}a.strstart+=a.lookahead,a.lookahead=0;var d=a.block_start+c;if((0===a.strstart||a.strstart>=d)&&(a.lookahead=a.strstart-d,a.strstart=d,h(a,!1),0===a.strm.avail_out))return ua;if(a.strstart-a.block_start>=a.w_size-la&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=0,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.strstart>a.block_start&&(h(a,!1),0===a.strm.avail_out)?ua:ua}function o(a,b){for(var c,d;;){if(a.lookahead<la){if(m(a),a.lookahead<la&&b===J)return ua;if(0===a.lookahead)break}if(c=0,a.lookahead>=ja&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),0!==c&&a.strstart-c<=a.w_size-la&&(a.match_length=l(a,c)),a.match_length>=ja)if(d=F._tr_tally(a,a.strstart-a.match_start,a.match_length-ja),a.lookahead-=a.match_length,a.match_length<=a.max_lazy_match&&a.lookahead>=ja){a.match_length--;do a.strstart++,a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart;while(0!==--a.match_length);a.strstart++}else a.strstart+=a.match_length,a.match_length=0,a.ins_h=a.window[a.strstart],a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+1])&a.hash_mask;else d=F._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++;if(d&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=a.strstart<ja-1?a.strstart:ja-1,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function p(a,b){for(var c,d,e;;){if(a.lookahead<la){if(m(a),a.lookahead<la&&b===J)return ua;if(0===a.lookahead)break}if(c=0,a.lookahead>=ja&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart),a.prev_length=a.match_length,a.prev_match=a.match_start,a.match_length=ja-1,0!==c&&a.prev_length<a.max_lazy_match&&a.strstart-c<=a.w_size-la&&(a.match_length=l(a,c),a.match_length<=5&&(a.strategy===U||a.match_length===ja&&a.strstart-a.match_start>4096)&&(a.match_length=ja-1)),a.prev_length>=ja&&a.match_length<=a.prev_length){e=a.strstart+a.lookahead-ja,d=F._tr_tally(a,a.strstart-1-a.prev_match,a.prev_length-ja),a.lookahead-=a.prev_length-1,a.prev_length-=2;do++a.strstart<=e&&(a.ins_h=(a.ins_h<<a.hash_shift^a.window[a.strstart+ja-1])&a.hash_mask,c=a.prev[a.strstart&a.w_mask]=a.head[a.ins_h],a.head[a.ins_h]=a.strstart);while(0!==--a.prev_length);if(a.match_available=0,a.match_length=ja-1,a.strstart++,d&&(h(a,!1),0===a.strm.avail_out))return ua}else if(a.match_available){if(d=F._tr_tally(a,0,a.window[a.strstart-1]),d&&h(a,!1),a.strstart++,a.lookahead--,0===a.strm.avail_out)return ua}else a.match_available=1,a.strstart++,a.lookahead--}return a.match_available&&(d=F._tr_tally(a,0,a.window[a.strstart-1]),a.match_available=0),a.insert=a.strstart<ja-1?a.strstart:ja-1,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function q(a,b){for(var c,d,e,f,g=a.window;;){if(a.lookahead<=ka){if(m(a),a.lookahead<=ka&&b===J)return ua;if(0===a.lookahead)break}if(a.match_length=0,a.lookahead>=ja&&a.strstart>0&&(e=a.strstart-1,d=g[e],d===g[++e]&&d===g[++e]&&d===g[++e])){f=a.strstart+ka;do;while(d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&d===g[++e]&&e<f);a.match_length=ka-(f-e),a.match_length>a.lookahead&&(a.match_length=a.lookahead)}if(a.match_length>=ja?(c=F._tr_tally(a,1,a.match_length-ja),a.lookahead-=a.match_length,a.strstart+=a.match_length,a.match_length=0):(c=F._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++),c&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=0,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function r(a,b){for(var c;;){if(0===a.lookahead&&(m(a),0===a.lookahead)){if(b===J)return ua;break}if(a.match_length=0,c=F._tr_tally(a,0,a.window[a.strstart]),a.lookahead--,a.strstart++,c&&(h(a,!1),0===a.strm.avail_out))return ua}return a.insert=0,b===M?(h(a,!0),0===a.strm.avail_out?wa:xa):a.last_lit&&(h(a,!1),0===a.strm.avail_out)?ua:va}function s(a,b,c,d,e){this.good_length=a,this.max_lazy=b,this.nice_length=c,this.max_chain=d,this.func=e}function t(a){a.window_size=2*a.w_size,f(a.head),a.max_lazy_match=D[a.level].max_lazy,a.good_match=D[a.level].good_length,a.nice_match=D[a.level].nice_length,a.max_chain_length=D[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=ja-1,a.match_available=0,a.ins_h=0}function u(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=$,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new E.Buf16(2*ha),this.dyn_dtree=new E.Buf16(2*(2*fa+1)),this.bl_tree=new E.Buf16(2*(2*ga+1)),f(this.dyn_ltree),f(this.dyn_dtree),f(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new E.Buf16(ia+1),this.heap=new E.Buf16(2*ea+1),f(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new E.Buf16(2*ea+1),f(this.depth),this.l_buf=0,this.lit_bufsize=0,this.last_lit=0,this.d_buf=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}function v(a){var b;return a&&a.state?(a.total_in=a.total_out=0,a.data_type=Z,b=a.state,b.pending=0,b.pending_out=0,b.wrap<0&&(b.wrap=-b.wrap),b.status=b.wrap?na:sa,a.adler=2===b.wrap?0:1,b.last_flush=J,F._tr_init(b),O):d(a,Q)}function w(a){var b=v(a);return b===O&&t(a.state),b}function x(a,b){return a&&a.state?2!==a.state.wrap?Q:(a.state.gzhead=b,O):Q}function y(a,b,c,e,f,g){if(!a)return Q;var h=1;if(b===T&&(b=6),e<0?(h=0,e=-e):e>15&&(h=2,e-=16),f<1||f>_||c!==$||e<8||e>15||b<0||b>9||g<0||g>X)return d(a,Q);8===e&&(e=9);var i=new u;return a.state=i,i.strm=a,i.wrap=h,i.gzhead=null,i.w_bits=e,i.w_size=1<<i.w_bits,i.w_mask=i.w_size-1,i.hash_bits=f+7,i.hash_size=1<<i.hash_bits,i.hash_mask=i.hash_size-1,i.hash_shift=~~((i.hash_bits+ja-1)/ja),i.window=new E.Buf8(2*i.w_size),i.head=new E.Buf16(i.hash_size),i.prev=new E.Buf16(i.w_size),i.lit_bufsize=1<<f+6,i.pending_buf_size=4*i.lit_bufsize,i.pending_buf=new E.Buf8(i.pending_buf_size),i.d_buf=1*i.lit_bufsize,i.l_buf=3*i.lit_bufsize,i.level=b,i.strategy=g,i.method=c,w(a)}function z(a,b){return y(a,b,$,aa,ba,Y)}function A(a,b){var c,h,k,l;if(!a||!a.state||b>N||b<0)return a?d(a,Q):Q;if(h=a.state,!a.output||!a.input&&0!==a.avail_in||h.status===ta&&b!==M)return d(a,0===a.avail_out?S:Q);if(h.strm=a,c=h.last_flush,h.last_flush=b,h.status===na)if(2===h.wrap)a.adler=0,i(h,31),i(h,139),i(h,8),h.gzhead?(i(h,(h.gzhead.text?1:0)+(h.gzhead.hcrc?2:0)+(h.gzhead.extra?4:0)+(h.gzhead.name?8:0)+(h.gzhead.comment?16:0)),i(h,255&h.gzhead.time),i(h,h.gzhead.time>>8&255),i(h,h.gzhead.time>>16&255),i(h,h.gzhead.time>>24&255),i(h,9===h.level?2:h.strategy>=V||h.level<2?4:0),i(h,255&h.gzhead.os),h.gzhead.extra&&h.gzhead.extra.length&&(i(h,255&h.gzhead.extra.length),i(h,h.gzhead.extra.length>>8&255)),h.gzhead.hcrc&&(a.adler=H(a.adler,h.pending_buf,h.pending,0)),h.gzindex=0,h.status=oa):(i(h,0),i(h,0),i(h,0),i(h,0),i(h,0),i(h,9===h.level?2:h.strategy>=V||h.level<2?4:0),i(h,ya),h.status=sa);else{var m=$+(h.w_bits-8<<4)<<8,n=-1;n=h.strategy>=V||h.level<2?0:h.level<6?1:6===h.level?2:3,m|=n<<6,0!==h.strstart&&(m|=ma),m+=31-m%31,h.status=sa,j(h,m),0!==h.strstart&&(j(h,a.adler>>>16),j(h,65535&a.adler)),a.adler=1}if(h.status===oa)if(h.gzhead.extra){for(k=h.pending;h.gzindex<(65535&h.gzhead.extra.length)&&(h.pending!==h.pending_buf_size||(h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending!==h.pending_buf_size));)i(h,255&h.gzhead.extra[h.gzindex]),h.gzindex++;h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),h.gzindex===h.gzhead.extra.length&&(h.gzindex=0,h.status=pa)}else h.status=pa;if(h.status===pa)if(h.gzhead.name){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.name.length?255&h.gzhead.name.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.gzindex=0,h.status=qa)}else h.status=qa;if(h.status===qa)if(h.gzhead.comment){k=h.pending;do{if(h.pending===h.pending_buf_size&&(h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),g(a),k=h.pending,h.pending===h.pending_buf_size)){l=1;break}l=h.gzindex<h.gzhead.comment.length?255&h.gzhead.comment.charCodeAt(h.gzindex++):0,i(h,l)}while(0!==l);h.gzhead.hcrc&&h.pending>k&&(a.adler=H(a.adler,h.pending_buf,h.pending-k,k)),0===l&&(h.status=ra)}else h.status=ra;if(h.status===ra&&(h.gzhead.hcrc?(h.pending+2>h.pending_buf_size&&g(a),h.pending+2<=h.pending_buf_size&&(i(h,255&a.adler),i(h,a.adler>>8&255),a.adler=0,h.status=sa)):h.status=sa),0!==h.pending){if(g(a),0===a.avail_out)return h.last_flush=-1,O}else if(0===a.avail_in&&e(b)<=e(c)&&b!==M)return d(a,S);if(h.status===ta&&0!==a.avail_in)return d(a,S);if(0!==a.avail_in||0!==h.lookahead||b!==J&&h.status!==ta){var o=h.strategy===V?r(h,b):h.strategy===W?q(h,b):D[h.level].func(h,b);if(o!==wa&&o!==xa||(h.status=ta),o===ua||o===wa)return 0===a.avail_out&&(h.last_flush=-1),O;if(o===va&&(b===K?F._tr_align(h):b!==N&&(F._tr_stored_block(h,0,0,!1),b===L&&(f(h.head),0===h.lookahead&&(h.strstart=0,h.block_start=0,h.insert=0))),g(a),0===a.avail_out))return h.last_flush=-1,O}return b!==M?O:h.wrap<=0?P:(2===h.wrap?(i(h,255&a.adler),i(h,a.adler>>8&255),i(h,a.adler>>16&255),i(h,a.adler>>24&255),i(h,255&a.total_in),i(h,a.total_in>>8&255),i(h,a.total_in>>16&255),i(h,a.total_in>>24&255)):(j(h,a.adler>>>16),j(h,65535&a.adler)),g(a),h.wrap>0&&(h.wrap=-h.wrap),0!==h.pending?O:P)}function B(a){var b;return a&&a.state?(b=a.state.status,b!==na&&b!==oa&&b!==pa&&b!==qa&&b!==ra&&b!==sa&&b!==ta?d(a,Q):(a.state=null,b===sa?d(a,R):O)):Q}function C(a,b){var c,d,e,g,h,i,j,k,l=b.length;if(!a||!a.state)return Q;if(c=a.state,g=c.wrap,2===g||1===g&&c.status!==na||c.lookahead)return Q;for(1===g&&(a.adler=G(a.adler,b,l,0)),c.wrap=0,l>=c.w_size&&(0===g&&(f(c.head),c.strstart=0,c.block_start=0,c.insert=0),k=new E.Buf8(c.w_size),E.arraySet(k,b,l-c.w_size,c.w_size,0),b=k,l=c.w_size),h=a.avail_in,i=a.next_in,j=a.input,a.avail_in=l,a.next_in=0,a.input=b,m(c);c.lookahead>=ja;){d=c.strstart,e=c.lookahead-(ja-1);do c.ins_h=(c.ins_h<<c.hash_shift^c.window[d+ja-1])&c.hash_mask,c.prev[d&c.w_mask]=c.head[c.ins_h],c.head[c.ins_h]=d,d++;while(--e);c.strstart=d,c.lookahead=ja-1,m(c)}return c.strstart+=c.lookahead,c.block_start=c.strstart,c.insert=c.lookahead,c.lookahead=0,c.match_length=c.prev_length=ja-1,c.match_available=0,a.next_in=i,a.input=j,a.avail_in=h,c.wrap=g,O}var D,E=a("../utils/common"),F=a("./trees"),G=a("./adler32"),H=a("./crc32"),I=a("./messages"),J=0,K=1,L=3,M=4,N=5,O=0,P=1,Q=-2,R=-3,S=-5,T=-1,U=1,V=2,W=3,X=4,Y=0,Z=2,$=8,_=9,aa=15,ba=8,ca=29,da=256,ea=da+1+ca,fa=30,ga=19,ha=2*ea+1,ia=15,ja=3,ka=258,la=ka+ja+1,ma=32,na=42,oa=69,pa=73,qa=91,ra=103,sa=113,ta=666,ua=1,va=2,wa=3,xa=4,ya=3;D=[new s(0,0,0,0,n),new s(4,4,8,4,o),new s(4,5,16,8,o),new s(4,6,32,32,o),new s(4,4,16,16,p),new s(8,16,32,32,p),new s(8,16,128,128,p),new s(8,32,128,256,p),new s(32,128,258,1024,p),new s(32,258,258,4096,p)],c.deflateInit=z,c.deflateInit2=y,c.deflateReset=w,c.deflateResetKeep=v,c.deflateSetHeader=x,c.deflate=A,c.deflateEnd=B,c.deflateSetDictionary=C,c.deflateInfo="pako deflate (from Nodeca project)"},{"../utils/common":62,"./adler32":64,"./crc32":66,"./messages":72,"./trees":73}],68:[function(a,b,c){"use strict";function d(){this.text=0,this.time=0,this.xflags=0,this.os=0,this.extra=null,this.extra_len=0,this.name="",this.comment="",this.hcrc=0,this.done=!1}b.exports=d},{}],69:[function(a,b,c){"use strict";var d=30,e=12;b.exports=function(a,b){var c,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z,A,B,C;c=a.state,f=a.next_in,B=a.input,g=f+(a.avail_in-5),h=a.next_out,C=a.output,i=h-(b-a.avail_out),j=h+(a.avail_out-257),k=c.dmax,l=c.wsize,m=c.whave,n=c.wnext,o=c.window,p=c.hold,q=c.bits,r=c.lencode,s=c.distcode,t=(1<<c.lenbits)-1,u=(1<<c.distbits)-1;a:do{q<15&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=r[p&t];b:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,0===w)C[h++]=65535&v;else{if(!(16&w)){if(0===(64&w)){v=r[(65535&v)+(p&(1<<w)-1)];continue b}if(32&w){c.mode=e;break a}a.msg="invalid literal/length code",c.mode=d;break a}x=65535&v,w&=15,w&&(q<w&&(p+=B[f++]<<q,q+=8),x+=p&(1<<w)-1,p>>>=w,q-=w),q<15&&(p+=B[f++]<<q,q+=8,p+=B[f++]<<q,q+=8),v=s[p&u];c:for(;;){if(w=v>>>24,p>>>=w,q-=w,w=v>>>16&255,!(16&w)){if(0===(64&w)){v=s[(65535&v)+(p&(1<<w)-1)];continue c}a.msg="invalid distance code",c.mode=d;break a}if(y=65535&v,w&=15,q<w&&(p+=B[f++]<<q,q+=8,q<w&&(p+=B[f++]<<q,q+=8)),y+=p&(1<<w)-1,y>k){a.msg="invalid distance too far back",c.mode=d;break a}if(p>>>=w,q-=w,w=h-i,y>w){if(w=y-w,w>m&&c.sane){a.msg="invalid distance too far back",c.mode=d;break a}if(z=0,A=o,0===n){if(z+=l-w,w<x){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}else if(n<w){if(z+=l+n-w,w-=n,w<x){x-=w;do C[h++]=o[z++];while(--w);if(z=0,n<x){w=n,x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}}}else if(z+=n-w,w<x){x-=w;do C[h++]=o[z++];while(--w);z=h-y,A=C}for(;x>2;)C[h++]=A[z++],C[h++]=A[z++],C[h++]=A[z++],x-=3;x&&(C[h++]=A[z++],x>1&&(C[h++]=A[z++]))}else{z=h-y;do C[h++]=C[z++],C[h++]=C[z++],C[h++]=C[z++],x-=3;while(x>2);x&&(C[h++]=C[z++],x>1&&(C[h++]=C[z++]))}break}}break}}while(f<g&&h<j);x=q>>3,f-=x,q-=x<<3,p&=(1<<q)-1,a.next_in=f,a.next_out=h,a.avail_in=f<g?5+(g-f):5-(f-g),a.avail_out=h<j?257+(j-h):257-(h-j),c.hold=p,c.bits=q}},{}],70:[function(a,b,c){"use strict";function d(a){return(a>>>24&255)+(a>>>8&65280)+((65280&a)<<8)+((255&a)<<24)}function e(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new s.Buf16(320),this.work=new s.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function f(a){var b;return a&&a.state?(b=a.state,a.total_in=a.total_out=b.total=0,a.msg="",b.wrap&&(a.adler=1&b.wrap),b.mode=L,b.last=0,b.havedict=0,b.dmax=32768,b.head=null,b.hold=0,b.bits=0,b.lencode=b.lendyn=new s.Buf32(pa),b.distcode=b.distdyn=new s.Buf32(qa),b.sane=1,b.back=-1,D):G}function g(a){var b;return a&&a.state?(b=a.state,b.wsize=0,b.whave=0,b.wnext=0,f(a)):G}function h(a,b){var c,d;return a&&a.state?(d=a.state,b<0?(c=0,b=-b):(c=(b>>4)+1,b<48&&(b&=15)),b&&(b<8||b>15)?G:(null!==d.window&&d.wbits!==b&&(d.window=null),d.wrap=c,d.wbits=b,g(a))):G}function i(a,b){var c,d;return a?(d=new e,a.state=d,d.window=null,c=h(a,b),c!==D&&(a.state=null),c):G}function j(a){return i(a,sa)}function k(a){if(ta){var b;for(q=new s.Buf32(512),r=new s.Buf32(32),b=0;b<144;)a.lens[b++]=8;for(;b<256;)a.lens[b++]=9;for(;b<280;)a.lens[b++]=7;for(;b<288;)a.lens[b++]=8;for(w(y,a.lens,0,288,q,0,a.work,{bits:9}),b=0;b<32;)a.lens[b++]=5;w(z,a.lens,0,32,r,0,a.work,{bits:5}),ta=!1}a.lencode=q,a.lenbits=9,a.distcode=r,a.distbits=5}function l(a,b,c,d){var e,f=a.state;return null===f.window&&(f.wsize=1<<f.wbits,f.wnext=0,f.whave=0,f.window=new s.Buf8(f.wsize)),d>=f.wsize?(s.arraySet(f.window,b,c-f.wsize,f.wsize,0),f.wnext=0,f.whave=f.wsize):(e=f.wsize-f.wnext,e>d&&(e=d),s.arraySet(f.window,b,c-d,e,f.wnext),d-=e,d?(s.arraySet(f.window,b,c-d,d,0),f.wnext=d,f.whave=f.wsize):(f.wnext+=e,f.wnext===f.wsize&&(f.wnext=0),f.whave<f.wsize&&(f.whave+=e))),0}function m(a,b){var c,e,f,g,h,i,j,m,n,o,p,q,r,pa,qa,ra,sa,ta,ua,va,wa,xa,ya,za,Aa=0,Ba=new s.Buf8(4),Ca=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15];if(!a||!a.state||!a.output||!a.input&&0!==a.avail_in)return G;c=a.state,c.mode===W&&(c.mode=X),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,o=i,p=j,xa=D;a:for(;;)switch(c.mode){case L:if(0===c.wrap){c.mode=X;break}for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(2&c.wrap&&35615===m){c.check=0,Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0),m=0,n=0,c.mode=M;break}if(c.flags=0,c.head&&(c.head.done=!1),!(1&c.wrap)||(((255&m)<<8)+(m>>8))%31){a.msg="incorrect header check",c.mode=ma;break}if((15&m)!==K){a.msg="unknown compression method",c.mode=ma;break}if(m>>>=4,n-=4,wa=(15&m)+8,0===c.wbits)c.wbits=wa;else if(wa>c.wbits){a.msg="invalid window size",c.mode=ma;break}c.dmax=1<<wa,a.adler=c.check=1,c.mode=512&m?U:W,m=0,n=0;break;case M:for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.flags=m,(255&c.flags)!==K){a.msg="unknown compression method",c.mode=ma;break}if(57344&c.flags){a.msg="unknown header flags set",c.mode=ma;break}c.head&&(c.head.text=m>>8&1),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0)),m=0,n=0,c.mode=N;case N:for(;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.time=m),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,Ba[2]=m>>>16&255,Ba[3]=m>>>24&255,c.check=u(c.check,Ba,4,0)),m=0,n=0,c.mode=O;case O:for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.head&&(c.head.xflags=255&m,c.head.os=m>>8),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0)),m=0,n=0,c.mode=P;case P:if(1024&c.flags){for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length=m,c.head&&(c.head.extra_len=m),512&c.flags&&(Ba[0]=255&m,Ba[1]=m>>>8&255,c.check=u(c.check,Ba,2,0)),m=0,n=0}else c.head&&(c.head.extra=null);c.mode=Q;case Q:if(1024&c.flags&&(q=c.length,q>i&&(q=i),q&&(c.head&&(wa=c.head.extra_len-c.length,c.head.extra||(c.head.extra=new Array(c.head.extra_len)),s.arraySet(c.head.extra,e,g,q,wa)),512&c.flags&&(c.check=u(c.check,e,q,g)),i-=q,g+=q,c.length-=q),c.length))break a;c.length=0,c.mode=R;case R:if(2048&c.flags){if(0===i)break a;q=0;do wa=e[g+q++],c.head&&wa&&c.length<65536&&(c.head.name+=String.fromCharCode(wa));while(wa&&q<i);if(512&c.flags&&(c.check=u(c.check,e,q,g)),i-=q,g+=q,wa)break a}else c.head&&(c.head.name=null);c.length=0,c.mode=S;case S:if(4096&c.flags){if(0===i)break a;q=0;do wa=e[g+q++],c.head&&wa&&c.length<65536&&(c.head.comment+=String.fromCharCode(wa));while(wa&&q<i);if(512&c.flags&&(c.check=u(c.check,e,q,g)),i-=q,g+=q,wa)break a}else c.head&&(c.head.comment=null);c.mode=T;case T:if(512&c.flags){for(;n<16;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(65535&c.check)){a.msg="header crc mismatch",c.mode=ma;break}m=0,n=0}c.head&&(c.head.hcrc=c.flags>>9&1,c.head.done=!0),a.adler=c.check=0,c.mode=W;break;case U:for(;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}a.adler=c.check=d(m),m=0,n=0,c.mode=V;case V:if(0===c.havedict)return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,F;a.adler=c.check=1,c.mode=W;case W:if(b===B||b===C)break a;case X:if(c.last){m>>>=7&n,n-=7&n,c.mode=ja;break}for(;n<3;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}switch(c.last=1&m,m>>>=1,n-=1,3&m){case 0:c.mode=Y;break;case 1:if(k(c),c.mode=ca,b===C){m>>>=2,n-=2;break a}break;case 2:c.mode=_;break;case 3:a.msg="invalid block type",c.mode=ma}m>>>=2,n-=2;break;case Y:for(m>>>=7&n,n-=7&n;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if((65535&m)!==(m>>>16^65535)){a.msg="invalid stored block lengths",c.mode=ma;break}if(c.length=65535&m,m=0,n=0,c.mode=Z,b===C)break a;case Z:c.mode=$;case $:if(q=c.length){if(q>i&&(q=i),q>j&&(q=j),0===q)break a;s.arraySet(f,e,g,q,h),i-=q,g+=q,j-=q,h+=q,c.length-=q;break}c.mode=W;break;case _:for(;n<14;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(c.nlen=(31&m)+257,m>>>=5,n-=5,c.ndist=(31&m)+1,m>>>=5,n-=5,c.ncode=(15&m)+4,m>>>=4,n-=4,c.nlen>286||c.ndist>30){a.msg="too many length or distance symbols",c.mode=ma;break}c.have=0,c.mode=aa;case aa:for(;c.have<c.ncode;){for(;n<3;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.lens[Ca[c.have++]]=7&m,m>>>=3,n-=3}for(;c.have<19;)c.lens[Ca[c.have++]]=0;if(c.lencode=c.lendyn,c.lenbits=7,ya={bits:c.lenbits},xa=w(x,c.lens,0,19,c.lencode,0,c.work,ya),c.lenbits=ya.bits,xa){a.msg="invalid code lengths set",c.mode=ma;break}c.have=0,c.mode=ba;case ba:for(;c.have<c.nlen+c.ndist;){for(;Aa=c.lencode[m&(1<<c.lenbits)-1],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(sa<16)m>>>=qa,n-=qa,c.lens[c.have++]=sa;else{if(16===sa){for(za=qa+2;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m>>>=qa,n-=qa,0===c.have){a.msg="invalid bit length repeat",c.mode=ma;break}wa=c.lens[c.have-1],q=3+(3&m),m>>>=2,n-=2}else if(17===sa){for(za=qa+3;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qa,n-=qa,wa=0,q=3+(7&m),m>>>=3,n-=3}else{for(za=qa+7;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=qa,n-=qa,wa=0,q=11+(127&m),m>>>=7,n-=7}if(c.have+q>c.nlen+c.ndist){a.msg="invalid bit length repeat",c.mode=ma;break}for(;q--;)c.lens[c.have++]=wa}}if(c.mode===ma)break;if(0===c.lens[256]){a.msg="invalid code -- missing end-of-block",c.mode=ma;break}if(c.lenbits=9,ya={bits:c.lenbits},xa=w(y,c.lens,0,c.nlen,c.lencode,0,c.work,ya),c.lenbits=ya.bits,xa){a.msg="invalid literal/lengths set",c.mode=ma;break}if(c.distbits=6,c.distcode=c.distdyn,ya={bits:c.distbits},xa=w(z,c.lens,c.nlen,c.ndist,c.distcode,0,c.work,ya),c.distbits=ya.bits,xa){a.msg="invalid distances set",c.mode=ma;break}if(c.mode=ca,b===C)break a;case ca:c.mode=da;case da:if(i>=6&&j>=258){a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,v(a,p),h=a.next_out,f=a.output,j=a.avail_out,g=a.next_in,e=a.input,i=a.avail_in,m=c.hold,n=c.bits,c.mode===W&&(c.back=-1);break}for(c.back=0;Aa=c.lencode[m&(1<<c.lenbits)-1],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(ra&&0===(240&ra)){for(ta=qa,ua=ra,va=sa;Aa=c.lencode[va+((m&(1<<ta+ua)-1)>>ta)],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(ta+qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=ta,n-=ta,c.back+=ta}if(m>>>=qa,n-=qa,c.back+=qa,c.length=sa,0===ra){c.mode=ia;break}if(32&ra){c.back=-1,c.mode=W;break}if(64&ra){a.msg="invalid literal/length code",c.mode=ma;break}c.extra=15&ra,c.mode=ea;case ea:if(c.extra){for(za=c.extra;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.length+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}c.was=c.length,c.mode=fa;case fa:for(;Aa=c.distcode[m&(1<<c.distbits)-1],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(0===(240&ra)){for(ta=qa,ua=ra,va=sa;Aa=c.distcode[va+((m&(1<<ta+ua)-1)>>ta)],qa=Aa>>>24,ra=Aa>>>16&255,sa=65535&Aa,!(ta+qa<=n);){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}m>>>=ta,n-=ta,c.back+=ta}if(m>>>=qa,n-=qa,c.back+=qa,64&ra){a.msg="invalid distance code",c.mode=ma;break}c.offset=sa,c.extra=15&ra,c.mode=ga;case ga:if(c.extra){for(za=c.extra;n<za;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}c.offset+=m&(1<<c.extra)-1,m>>>=c.extra,n-=c.extra,c.back+=c.extra}if(c.offset>c.dmax){a.msg="invalid distance too far back",c.mode=ma;break}c.mode=ha;case ha:if(0===j)break a;if(q=p-j,c.offset>q){if(q=c.offset-q,q>c.whave&&c.sane){a.msg="invalid distance too far back",c.mode=ma;break}q>c.wnext?(q-=c.wnext,r=c.wsize-q):r=c.wnext-q,q>c.length&&(q=c.length),pa=c.window}else pa=f,r=h-c.offset,q=c.length;q>j&&(q=j),j-=q,c.length-=q;do f[h++]=pa[r++];while(--q);0===c.length&&(c.mode=da);break;case ia:if(0===j)break a;f[h++]=c.length,j--,c.mode=da;break;case ja:if(c.wrap){for(;n<32;){if(0===i)break a;i--,m|=e[g++]<<n,n+=8}if(p-=j,a.total_out+=p,c.total+=p,p&&(a.adler=c.check=c.flags?u(c.check,f,p,h-p):t(c.check,f,p,h-p)),p=j,(c.flags?m:d(m))!==c.check){a.msg="incorrect data check",c.mode=ma;break}m=0,n=0}c.mode=ka;case ka:if(c.wrap&&c.flags){for(;n<32;){if(0===i)break a;i--,m+=e[g++]<<n,n+=8}if(m!==(4294967295&c.total)){a.msg="incorrect length check",c.mode=ma;break}m=0,n=0}c.mode=la;case la:xa=E;break a;case ma:xa=H;break a;case na:return I;case oa:default:return G}return a.next_out=h,a.avail_out=j,a.next_in=g,a.avail_in=i,c.hold=m,c.bits=n,(c.wsize||p!==a.avail_out&&c.mode<ma&&(c.mode<ja||b!==A))&&l(a,a.output,a.next_out,p-a.avail_out)?(c.mode=na,I):(o-=a.avail_in,p-=a.avail_out,a.total_in+=o,a.total_out+=p,c.total+=p,c.wrap&&p&&(a.adler=c.check=c.flags?u(c.check,f,p,a.next_out-p):t(c.check,f,p,a.next_out-p)),a.data_type=c.bits+(c.last?64:0)+(c.mode===W?128:0)+(c.mode===ca||c.mode===Z?256:0),(0===o&&0===p||b===A)&&xa===D&&(xa=J),xa)}function n(a){if(!a||!a.state)return G;var b=a.state;return b.window&&(b.window=null),a.state=null,D}function o(a,b){var c;return a&&a.state?(c=a.state,0===(2&c.wrap)?G:(c.head=b,b.done=!1,D)):G}function p(a,b){var c,d,e,f=b.length;return a&&a.state?(c=a.state,0!==c.wrap&&c.mode!==V?G:c.mode===V&&(d=1,d=t(d,b,f,0),d!==c.check)?H:(e=l(a,b,f,f))?(c.mode=na,I):(c.havedict=1,D)):G}var q,r,s=a("../utils/common"),t=a("./adler32"),u=a("./crc32"),v=a("./inffast"),w=a("./inftrees"),x=0,y=1,z=2,A=4,B=5,C=6,D=0,E=1,F=2,G=-2,H=-3,I=-4,J=-5,K=8,L=1,M=2,N=3,O=4,P=5,Q=6,R=7,S=8,T=9,U=10,V=11,W=12,X=13,Y=14,Z=15,$=16,_=17,aa=18,ba=19,ca=20,da=21,ea=22,fa=23,ga=24,ha=25,ia=26,ja=27,ka=28,la=29,ma=30,na=31,oa=32,pa=852,qa=592,ra=15,sa=ra,ta=!0;c.inflateReset=g,c.inflateReset2=h,c.inflateResetKeep=f,c.inflateInit=j,c.inflateInit2=i,c.inflate=m,c.inflateEnd=n,c.inflateGetHeader=o,c.inflateSetDictionary=p,c.inflateInfo="pako inflate (from Nodeca project)"},{"../utils/common":62,"./adler32":64,"./crc32":66,"./inffast":69,"./inftrees":71}],71:[function(a,b,c){"use strict";var d=a("../utils/common"),e=15,f=852,g=592,h=0,i=1,j=2,k=[3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258,0,0],l=[16,16,16,16,16,16,16,16,17,17,17,17,18,18,18,18,19,19,19,19,20,20,20,20,21,21,21,21,16,72,78],m=[1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0],n=[16,16,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24,25,25,26,26,27,27,28,28,29,29,64,64];b.exports=function(a,b,c,o,p,q,r,s){var t,u,v,w,x,y,z,A,B,C=s.bits,D=0,E=0,F=0,G=0,H=0,I=0,J=0,K=0,L=0,M=0,N=null,O=0,P=new d.Buf16(e+1),Q=new d.Buf16(e+1),R=null,S=0;for(D=0;D<=e;D++)P[D]=0;for(E=0;E<o;E++)P[b[c+E]]++;for(H=C,G=e;G>=1&&0===P[G];G--);if(H>G&&(H=G),0===G)return p[q++]=20971520,p[q++]=20971520,s.bits=1,0;for(F=1;F<G&&0===P[F];F++);for(H<F&&(H=F),K=1,D=1;D<=e;D++)if(K<<=1,K-=P[D],K<0)return-1;if(K>0&&(a===h||1!==G))return-1;for(Q[1]=0,D=1;D<e;D++)Q[D+1]=Q[D]+P[D];for(E=0;E<o;E++)0!==b[c+E]&&(r[Q[b[c+E]]++]=E);if(a===h?(N=R=r,y=19):a===i?(N=k,O-=257,R=l,S-=257,y=256):(N=m,R=n,y=-1),M=0,E=0,D=F,x=q,I=H,J=0,v=-1,L=1<<H,w=L-1,a===i&&L>f||a===j&&L>g)return 1;for(;;){z=D-J,r[E]<y?(A=0,B=r[E]):r[E]>y?(A=R[S+r[E]],B=N[O+r[E]]):(A=96,B=0),t=1<<D-J,u=1<<I,F=u;do u-=t,p[x+(M>>J)+u]=z<<24|A<<16|B|0;while(0!==u);for(t=1<<D-1;M&t;)t>>=1;if(0!==t?(M&=t-1,M+=t):M=0,E++,0===--P[D]){if(D===G)break;D=b[c+r[E]]}if(D>H&&(M&w)!==v){for(0===J&&(J=H),x+=F,I=D-J,K=1<<I;I+J<G&&(K-=P[I+J],!(K<=0));)I++,K<<=1;if(L+=1<<I,a===i&&L>f||a===j&&L>g)return 1;v=M&w,p[v]=H<<24|I<<16|x-q|0}}return 0!==M&&(p[x+M]=D-J<<24|64<<16|0),s.bits=H,0}},{"../utils/common":62}],72:[function(a,b,c){"use strict";b.exports={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"}},{}],73:[function(a,b,c){"use strict";function d(a){for(var b=a.length;--b>=0;)a[b]=0}function e(a,b,c,d,e){this.static_tree=a,this.extra_bits=b,this.extra_base=c,this.elems=d,this.max_length=e,this.has_stree=a&&a.length}function f(a,b){this.dyn_tree=a,this.max_code=0,this.stat_desc=b}function g(a){return a<256?ia[a]:ia[256+(a>>>7)]}function h(a,b){a.pending_buf[a.pending++]=255&b,a.pending_buf[a.pending++]=b>>>8&255}function i(a,b,c){a.bi_valid>X-c?(a.bi_buf|=b<<a.bi_valid&65535,h(a,a.bi_buf),a.bi_buf=b>>X-a.bi_valid,a.bi_valid+=c-X):(a.bi_buf|=b<<a.bi_valid&65535,a.bi_valid+=c)}function j(a,b,c){i(a,c[2*b],c[2*b+1])}function k(a,b){var c=0;do c|=1&a,a>>>=1,c<<=1;while(--b>0);return c>>>1}function l(a){16===a.bi_valid?(h(a,a.bi_buf),a.bi_buf=0,a.bi_valid=0):a.bi_valid>=8&&(a.pending_buf[a.pending++]=255&a.bi_buf,a.bi_buf>>=8,a.bi_valid-=8)}function m(a,b){var c,d,e,f,g,h,i=b.dyn_tree,j=b.max_code,k=b.stat_desc.static_tree,l=b.stat_desc.has_stree,m=b.stat_desc.extra_bits,n=b.stat_desc.extra_base,o=b.stat_desc.max_length,p=0;for(f=0;f<=W;f++)a.bl_count[f]=0;for(i[2*a.heap[a.heap_max]+1]=0,
c=a.heap_max+1;c<V;c++)d=a.heap[c],f=i[2*i[2*d+1]+1]+1,f>o&&(f=o,p++),i[2*d+1]=f,d>j||(a.bl_count[f]++,g=0,d>=n&&(g=m[d-n]),h=i[2*d],a.opt_len+=h*(f+g),l&&(a.static_len+=h*(k[2*d+1]+g)));if(0!==p){do{for(f=o-1;0===a.bl_count[f];)f--;a.bl_count[f]--,a.bl_count[f+1]+=2,a.bl_count[o]--,p-=2}while(p>0);for(f=o;0!==f;f--)for(d=a.bl_count[f];0!==d;)e=a.heap[--c],e>j||(i[2*e+1]!==f&&(a.opt_len+=(f-i[2*e+1])*i[2*e],i[2*e+1]=f),d--)}}function n(a,b,c){var d,e,f=new Array(W+1),g=0;for(d=1;d<=W;d++)f[d]=g=g+c[d-1]<<1;for(e=0;e<=b;e++){var h=a[2*e+1];0!==h&&(a[2*e]=k(f[h]++,h))}}function o(){var a,b,c,d,f,g=new Array(W+1);for(c=0,d=0;d<Q-1;d++)for(ka[d]=c,a=0;a<1<<ba[d];a++)ja[c++]=d;for(ja[c-1]=d,f=0,d=0;d<16;d++)for(la[d]=f,a=0;a<1<<ca[d];a++)ia[f++]=d;for(f>>=7;d<T;d++)for(la[d]=f<<7,a=0;a<1<<ca[d]-7;a++)ia[256+f++]=d;for(b=0;b<=W;b++)g[b]=0;for(a=0;a<=143;)ga[2*a+1]=8,a++,g[8]++;for(;a<=255;)ga[2*a+1]=9,a++,g[9]++;for(;a<=279;)ga[2*a+1]=7,a++,g[7]++;for(;a<=287;)ga[2*a+1]=8,a++,g[8]++;for(n(ga,S+1,g),a=0;a<T;a++)ha[2*a+1]=5,ha[2*a]=k(a,5);ma=new e(ga,ba,R+1,S,W),na=new e(ha,ca,0,T,W),oa=new e(new Array(0),da,0,U,Y)}function p(a){var b;for(b=0;b<S;b++)a.dyn_ltree[2*b]=0;for(b=0;b<T;b++)a.dyn_dtree[2*b]=0;for(b=0;b<U;b++)a.bl_tree[2*b]=0;a.dyn_ltree[2*Z]=1,a.opt_len=a.static_len=0,a.last_lit=a.matches=0}function q(a){a.bi_valid>8?h(a,a.bi_buf):a.bi_valid>0&&(a.pending_buf[a.pending++]=a.bi_buf),a.bi_buf=0,a.bi_valid=0}function r(a,b,c,d){q(a),d&&(h(a,c),h(a,~c)),G.arraySet(a.pending_buf,a.window,b,c,a.pending),a.pending+=c}function s(a,b,c,d){var e=2*b,f=2*c;return a[e]<a[f]||a[e]===a[f]&&d[b]<=d[c]}function t(a,b,c){for(var d=a.heap[c],e=c<<1;e<=a.heap_len&&(e<a.heap_len&&s(b,a.heap[e+1],a.heap[e],a.depth)&&e++,!s(b,d,a.heap[e],a.depth));)a.heap[c]=a.heap[e],c=e,e<<=1;a.heap[c]=d}function u(a,b,c){var d,e,f,h,k=0;if(0!==a.last_lit)do d=a.pending_buf[a.d_buf+2*k]<<8|a.pending_buf[a.d_buf+2*k+1],e=a.pending_buf[a.l_buf+k],k++,0===d?j(a,e,b):(f=ja[e],j(a,f+R+1,b),h=ba[f],0!==h&&(e-=ka[f],i(a,e,h)),d--,f=g(d),j(a,f,c),h=ca[f],0!==h&&(d-=la[f],i(a,d,h)));while(k<a.last_lit);j(a,Z,b)}function v(a,b){var c,d,e,f=b.dyn_tree,g=b.stat_desc.static_tree,h=b.stat_desc.has_stree,i=b.stat_desc.elems,j=-1;for(a.heap_len=0,a.heap_max=V,c=0;c<i;c++)0!==f[2*c]?(a.heap[++a.heap_len]=j=c,a.depth[c]=0):f[2*c+1]=0;for(;a.heap_len<2;)e=a.heap[++a.heap_len]=j<2?++j:0,f[2*e]=1,a.depth[e]=0,a.opt_len--,h&&(a.static_len-=g[2*e+1]);for(b.max_code=j,c=a.heap_len>>1;c>=1;c--)t(a,f,c);e=i;do c=a.heap[1],a.heap[1]=a.heap[a.heap_len--],t(a,f,1),d=a.heap[1],a.heap[--a.heap_max]=c,a.heap[--a.heap_max]=d,f[2*e]=f[2*c]+f[2*d],a.depth[e]=(a.depth[c]>=a.depth[d]?a.depth[c]:a.depth[d])+1,f[2*c+1]=f[2*d+1]=e,a.heap[1]=e++,t(a,f,1);while(a.heap_len>=2);a.heap[--a.heap_max]=a.heap[1],m(a,b),n(f,j,a.bl_count)}function w(a,b,c){var d,e,f=-1,g=b[1],h=0,i=7,j=4;for(0===g&&(i=138,j=3),b[2*(c+1)+1]=65535,d=0;d<=c;d++)e=g,g=b[2*(d+1)+1],++h<i&&e===g||(h<j?a.bl_tree[2*e]+=h:0!==e?(e!==f&&a.bl_tree[2*e]++,a.bl_tree[2*$]++):h<=10?a.bl_tree[2*_]++:a.bl_tree[2*aa]++,h=0,f=e,0===g?(i=138,j=3):e===g?(i=6,j=3):(i=7,j=4))}function x(a,b,c){var d,e,f=-1,g=b[1],h=0,k=7,l=4;for(0===g&&(k=138,l=3),d=0;d<=c;d++)if(e=g,g=b[2*(d+1)+1],!(++h<k&&e===g)){if(h<l){do j(a,e,a.bl_tree);while(0!==--h)}else 0!==e?(e!==f&&(j(a,e,a.bl_tree),h--),j(a,$,a.bl_tree),i(a,h-3,2)):h<=10?(j(a,_,a.bl_tree),i(a,h-3,3)):(j(a,aa,a.bl_tree),i(a,h-11,7));h=0,f=e,0===g?(k=138,l=3):e===g?(k=6,l=3):(k=7,l=4)}}function y(a){var b;for(w(a,a.dyn_ltree,a.l_desc.max_code),w(a,a.dyn_dtree,a.d_desc.max_code),v(a,a.bl_desc),b=U-1;b>=3&&0===a.bl_tree[2*ea[b]+1];b--);return a.opt_len+=3*(b+1)+5+5+4,b}function z(a,b,c,d){var e;for(i(a,b-257,5),i(a,c-1,5),i(a,d-4,4),e=0;e<d;e++)i(a,a.bl_tree[2*ea[e]+1],3);x(a,a.dyn_ltree,b-1),x(a,a.dyn_dtree,c-1)}function A(a){var b,c=4093624447;for(b=0;b<=31;b++,c>>>=1)if(1&c&&0!==a.dyn_ltree[2*b])return I;if(0!==a.dyn_ltree[18]||0!==a.dyn_ltree[20]||0!==a.dyn_ltree[26])return J;for(b=32;b<R;b++)if(0!==a.dyn_ltree[2*b])return J;return I}function B(a){pa||(o(),pa=!0),a.l_desc=new f(a.dyn_ltree,ma),a.d_desc=new f(a.dyn_dtree,na),a.bl_desc=new f(a.bl_tree,oa),a.bi_buf=0,a.bi_valid=0,p(a)}function C(a,b,c,d){i(a,(L<<1)+(d?1:0),3),r(a,b,c,!0)}function D(a){i(a,M<<1,3),j(a,Z,ga),l(a)}function E(a,b,c,d){var e,f,g=0;a.level>0?(a.strm.data_type===K&&(a.strm.data_type=A(a)),v(a,a.l_desc),v(a,a.d_desc),g=y(a),e=a.opt_len+3+7>>>3,f=a.static_len+3+7>>>3,f<=e&&(e=f)):e=f=c+5,c+4<=e&&b!==-1?C(a,b,c,d):a.strategy===H||f===e?(i(a,(M<<1)+(d?1:0),3),u(a,ga,ha)):(i(a,(N<<1)+(d?1:0),3),z(a,a.l_desc.max_code+1,a.d_desc.max_code+1,g+1),u(a,a.dyn_ltree,a.dyn_dtree)),p(a),d&&q(a)}function F(a,b,c){return a.pending_buf[a.d_buf+2*a.last_lit]=b>>>8&255,a.pending_buf[a.d_buf+2*a.last_lit+1]=255&b,a.pending_buf[a.l_buf+a.last_lit]=255&c,a.last_lit++,0===b?a.dyn_ltree[2*c]++:(a.matches++,b--,a.dyn_ltree[2*(ja[c]+R+1)]++,a.dyn_dtree[2*g(b)]++),a.last_lit===a.lit_bufsize-1}var G=a("../utils/common"),H=4,I=0,J=1,K=2,L=0,M=1,N=2,O=3,P=258,Q=29,R=256,S=R+1+Q,T=30,U=19,V=2*S+1,W=15,X=16,Y=7,Z=256,$=16,_=17,aa=18,ba=[0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0],ca=[0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13],da=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7],ea=[16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15],fa=512,ga=new Array(2*(S+2));d(ga);var ha=new Array(2*T);d(ha);var ia=new Array(fa);d(ia);var ja=new Array(P-O+1);d(ja);var ka=new Array(Q);d(ka);var la=new Array(T);d(la);var ma,na,oa,pa=!1;c._tr_init=B,c._tr_stored_block=C,c._tr_flush_block=E,c._tr_tally=F,c._tr_align=D},{"../utils/common":62}],74:[function(a,b,c){"use strict";function d(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0}b.exports=d},{}]},{},[10])(10)}); | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/libs/zip.min.js | zip.min.js |
(function () {
'use strict';
var isCommonjs = typeof module !== 'undefined' && module.exports;
var keyboardAllowed = typeof Element !== 'undefined' && 'ALLOW_KEYBOARD_INPUT' in Element;
var fn = (function () {
var val;
var valLength;
var fnMap = [
[
'requestFullscreen',
'exitFullscreen',
'fullscreenElement',
'fullscreenEnabled',
'fullscreenchange',
'fullscreenerror'
],
// new WebKit
[
'webkitRequestFullscreen',
'webkitExitFullscreen',
'webkitFullscreenElement',
'webkitFullscreenEnabled',
'webkitfullscreenchange',
'webkitfullscreenerror'
],
// old WebKit (Safari 5.1)
[
'webkitRequestFullScreen',
'webkitCancelFullScreen',
'webkitCurrentFullScreenElement',
'webkitCancelFullScreen',
'webkitfullscreenchange',
'webkitfullscreenerror'
],
[
'mozRequestFullScreen',
'mozCancelFullScreen',
'mozFullScreenElement',
'mozFullScreenEnabled',
'mozfullscreenchange',
'mozfullscreenerror'
],
[
'msRequestFullscreen',
'msExitFullscreen',
'msFullscreenElement',
'msFullscreenEnabled',
'MSFullscreenChange',
'MSFullscreenError'
]
];
var i = 0;
var l = fnMap.length;
var ret = {};
for (; i < l; i++) {
val = fnMap[i];
if (val && val[1] in document) {
for (i = 0, valLength = val.length; i < valLength; i++) {
ret[fnMap[0][i]] = val[i];
}
return ret;
}
}
return false;
})();
var screenfull = {
request: function (elem) {
var request = fn.requestFullscreen;
elem = elem || document.documentElement;
// Work around Safari 5.1 bug: reports support for
// keyboard in fullscreen even though it doesn't.
// Browser sniffing, since the alternative with
// setTimeout is even worse.
if (/5\.1[\.\d]* Safari/.test(navigator.userAgent)) {
elem[request]();
} else {
elem[request](keyboardAllowed && Element.ALLOW_KEYBOARD_INPUT);
}
},
exit: function () {
document[fn.exitFullscreen]();
},
toggle: function (elem) {
if (this.isFullscreen) {
this.exit();
} else {
this.request(elem);
}
},
raw: fn
};
if (!fn) {
if (isCommonjs) {
module.exports = false;
} else {
window.screenfull = false;
}
return;
}
Object.defineProperties(screenfull, {
isFullscreen: {
get: function () {
return !!document[fn.fullscreenElement];
}
},
element: {
enumerable: true,
get: function () {
return document[fn.fullscreenElement];
}
},
enabled: {
enumerable: true,
get: function () {
// Coerce to boolean in case of old WebKit
return !!document[fn.fullscreenEnabled];
}
}
});
if (isCommonjs) {
module.exports = screenfull;
} else {
window.screenfull = screenfull;
}
})(); | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/libs/screenfull.js | screenfull.js |
!function(a){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=a();else if("function"==typeof define&&define.amd)define([],a);else{var b;b="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,b.localforage=a()}}(function(){return function a(b,c,d){function e(g,h){if(!c[g]){if(!b[g]){var i="function"==typeof require&&require;if(!h&&i)return i(g,!0);if(f)return f(g,!0);var j=new Error("Cannot find module '"+g+"'");throw j.code="MODULE_NOT_FOUND",j}var k=c[g]={exports:{}};b[g][0].call(k.exports,function(a){var c=b[g][1][a];return e(c?c:a)},k,k.exports,a,b,c,d)}return c[g].exports}for(var f="function"==typeof require&&require,g=0;g<d.length;g++)e(d[g]);return e}({1:[function(a,b,c){(function(a){"use strict";function c(){k=!0;for(var a,b,c=l.length;c;){for(b=l,l=[],a=-1;++a<c;)b[a]();c=l.length}k=!1}function d(a){1!==l.push(a)||k||e()}var e,f=a.MutationObserver||a.WebKitMutationObserver;if(f){var g=0,h=new f(c),i=a.document.createTextNode("");h.observe(i,{characterData:!0}),e=function(){i.data=g=++g%2}}else if(a.setImmediate||"undefined"==typeof a.MessageChannel)e="document"in a&&"onreadystatechange"in a.document.createElement("script")?function(){var b=a.document.createElement("script");b.onreadystatechange=function(){c(),b.onreadystatechange=null,b.parentNode.removeChild(b),b=null},a.document.documentElement.appendChild(b)}:function(){setTimeout(c,0)};else{var j=new a.MessageChannel;j.port1.onmessage=c,e=function(){j.port2.postMessage(0)}}var k,l=[];b.exports=d}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],2:[function(a,b,c){"use strict";function d(){}function e(a){if("function"!=typeof a)throw new TypeError("resolver must be a function");this.state=s,this.queue=[],this.outcome=void 0,a!==d&&i(this,a)}function f(a,b,c){this.promise=a,"function"==typeof b&&(this.onFulfilled=b,this.callFulfilled=this.otherCallFulfilled),"function"==typeof c&&(this.onRejected=c,this.callRejected=this.otherCallRejected)}function g(a,b,c){o(function(){var d;try{d=b(c)}catch(b){return p.reject(a,b)}d===a?p.reject(a,new TypeError("Cannot resolve promise with itself")):p.resolve(a,d)})}function h(a){var b=a&&a.then;if(a&&"object"==typeof a&&"function"==typeof b)return function(){b.apply(a,arguments)}}function i(a,b){function c(b){f||(f=!0,p.reject(a,b))}function d(b){f||(f=!0,p.resolve(a,b))}function e(){b(d,c)}var f=!1,g=j(e);"error"===g.status&&c(g.value)}function j(a,b){var c={};try{c.value=a(b),c.status="success"}catch(a){c.status="error",c.value=a}return c}function k(a){return a instanceof this?a:p.resolve(new this(d),a)}function l(a){var b=new this(d);return p.reject(b,a)}function m(a){function b(a,b){function d(a){g[b]=a,++h!==e||f||(f=!0,p.resolve(j,g))}c.resolve(a).then(d,function(a){f||(f=!0,p.reject(j,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=new Array(e),h=0,i=-1,j=new this(d);++i<e;)b(a[i],i);return j}function n(a){function b(a){c.resolve(a).then(function(a){f||(f=!0,p.resolve(h,a))},function(a){f||(f=!0,p.reject(h,a))})}var c=this;if("[object Array]"!==Object.prototype.toString.call(a))return this.reject(new TypeError("must be an array"));var e=a.length,f=!1;if(!e)return this.resolve([]);for(var g=-1,h=new this(d);++g<e;)b(a[g]);return h}var o=a(1),p={},q=["REJECTED"],r=["FULFILLED"],s=["PENDING"];b.exports=c=e,e.prototype.catch=function(a){return this.then(null,a)},e.prototype.then=function(a,b){if("function"!=typeof a&&this.state===r||"function"!=typeof b&&this.state===q)return this;var c=new this.constructor(d);if(this.state!==s){var e=this.state===r?a:b;g(c,e,this.outcome)}else this.queue.push(new f(c,a,b));return c},f.prototype.callFulfilled=function(a){p.resolve(this.promise,a)},f.prototype.otherCallFulfilled=function(a){g(this.promise,this.onFulfilled,a)},f.prototype.callRejected=function(a){p.reject(this.promise,a)},f.prototype.otherCallRejected=function(a){g(this.promise,this.onRejected,a)},p.resolve=function(a,b){var c=j(h,b);if("error"===c.status)return p.reject(a,c.value);var d=c.value;if(d)i(a,d);else{a.state=r,a.outcome=b;for(var e=-1,f=a.queue.length;++e<f;)a.queue[e].callFulfilled(b)}return a},p.reject=function(a,b){a.state=q,a.outcome=b;for(var c=-1,d=a.queue.length;++c<d;)a.queue[c].callRejected(b);return a},c.resolve=k,c.reject=l,c.all=m,c.race=n},{1:1}],3:[function(a,b,c){(function(b){"use strict";"function"!=typeof b.Promise&&(b.Promise=a(2))}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{2:2}],4:[function(a,b,c){"use strict";function d(a,b){if(!(a instanceof b))throw new TypeError("Cannot call a class as a function")}function e(){try{if("undefined"!=typeof indexedDB)return indexedDB;if("undefined"!=typeof webkitIndexedDB)return webkitIndexedDB;if("undefined"!=typeof mozIndexedDB)return mozIndexedDB;if("undefined"!=typeof OIndexedDB)return OIndexedDB;if("undefined"!=typeof msIndexedDB)return msIndexedDB}catch(a){}}function f(){try{if(!ga)return!1;var a="undefined"!=typeof openDatabase&&/(Safari|iPhone|iPad|iPod)/.test(navigator.userAgent)&&!/Chrome/.test(navigator.userAgent)&&!/BlackBerry/.test(navigator.platform),b="function"==typeof fetch&&fetch.toString().indexOf("[native code")!==-1;return(!a||b)&&"undefined"!=typeof indexedDB&&"undefined"!=typeof IDBKeyRange}catch(a){return!1}}function g(){return"function"==typeof openDatabase}function h(){try{return"undefined"!=typeof localStorage&&"setItem"in localStorage&&localStorage.setItem}catch(a){return!1}}function i(a,b){a=a||[],b=b||{};try{return new Blob(a,b)}catch(f){if("TypeError"!==f.name)throw f;for(var c="undefined"!=typeof BlobBuilder?BlobBuilder:"undefined"!=typeof MSBlobBuilder?MSBlobBuilder:"undefined"!=typeof MozBlobBuilder?MozBlobBuilder:WebKitBlobBuilder,d=new c,e=0;e<a.length;e+=1)d.append(a[e]);return d.getBlob(b.type)}}function j(a,b){b&&a.then(function(a){b(null,a)},function(a){b(a)})}function k(a,b,c){"function"==typeof b&&a.then(b),"function"==typeof c&&a.catch(c)}function l(a){for(var b=a.length,c=new ArrayBuffer(b),d=new Uint8Array(c),e=0;e<b;e++)d[e]=a.charCodeAt(e);return c}function m(a){return new ja(function(b){var c=a.transaction(ka,"readwrite"),d=i([""]);c.objectStore(ka).put(d,"key"),c.onabort=function(a){a.preventDefault(),a.stopPropagation(),b(!1)},c.oncomplete=function(){var a=navigator.userAgent.match(/Chrome\/(\d+)/),c=navigator.userAgent.match(/Edge\//);b(c||!a||parseInt(a[1],10)>=43)}}).catch(function(){return!1})}function n(a){return"boolean"==typeof ha?ja.resolve(ha):m(a).then(function(a){return ha=a})}function o(a){var b=ia[a.name],c={};c.promise=new ja(function(a){c.resolve=a}),b.deferredOperations.push(c),b.dbReady?b.dbReady=b.dbReady.then(function(){return c.promise}):b.dbReady=c.promise}function p(a){var b=ia[a.name],c=b.deferredOperations.pop();c&&c.resolve()}function q(a,b){return new ja(function(c,d){if(a.db){if(!b)return c(a.db);o(a),a.db.close()}var e=[a.name];b&&e.push(a.version);var f=ga.open.apply(ga,e);b&&(f.onupgradeneeded=function(b){var c=f.result;try{c.createObjectStore(a.storeName),b.oldVersion<=1&&c.createObjectStore(ka)}catch(c){if("ConstraintError"!==c.name)throw c;console.warn('The database "'+a.name+'" has been upgraded from version '+b.oldVersion+" to version "+b.newVersion+', but the storage "'+a.storeName+'" already exists.')}}),f.onerror=function(a){a.preventDefault(),d(f.error)},f.onsuccess=function(){c(f.result),p(a)}})}function r(a){return q(a,!1)}function s(a){return q(a,!0)}function t(a,b){if(!a.db)return!0;var c=!a.db.objectStoreNames.contains(a.storeName),d=a.version<a.db.version,e=a.version>a.db.version;if(d&&(a.version!==b&&console.warn('The database "'+a.name+"\" can't be downgraded from version "+a.db.version+" to version "+a.version+"."),a.version=a.db.version),e||c){if(c){var f=a.db.version+1;f>a.version&&(a.version=f)}return!0}return!1}function u(a){return new ja(function(b,c){var d=new FileReader;d.onerror=c,d.onloadend=function(c){var d=btoa(c.target.result||"");b({__local_forage_encoded_blob:!0,data:d,type:a.type})},d.readAsBinaryString(a)})}function v(a){var b=l(atob(a.data));return i([b],{type:a.type})}function w(a){return a&&a.__local_forage_encoded_blob}function x(a){var b=this,c=b._initReady().then(function(){var a=ia[b._dbInfo.name];if(a&&a.dbReady)return a.dbReady});return k(c,a,a),c}function y(a){function b(){return ja.resolve()}var c=this,d={db:null};if(a)for(var e in a)d[e]=a[e];ia||(ia={});var f=ia[d.name];f||(f={forages:[],db:null,dbReady:null,deferredOperations:[]},ia[d.name]=f),f.forages.push(c),c._initReady||(c._initReady=c.ready,c.ready=x);for(var g=[],h=0;h<f.forages.length;h++){var i=f.forages[h];i!==c&&g.push(i._initReady().catch(b))}var j=f.forages.slice(0);return ja.all(g).then(function(){return d.db=f.db,r(d)}).then(function(a){return d.db=a,t(d,c._defaultConfig.version)?s(d):a}).then(function(a){d.db=f.db=a,c._dbInfo=d;for(var b=0;b<j.length;b++){var e=j[b];e!==c&&(e._dbInfo.db=d.db,e._dbInfo.version=d.version)}})}function z(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readonly").objectStore(e.storeName),g=f.get(a);g.onsuccess=function(){var a=g.result;void 0===a&&(a=null),w(a)&&(a=v(a)),b(a)},g.onerror=function(){d(g.error)}}).catch(d)});return j(d,b),d}function A(a,b){var c=this,d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readonly").objectStore(e.storeName),g=f.openCursor(),h=1;g.onsuccess=function(){var c=g.result;if(c){var d=c.value;w(d)&&(d=v(d));var e=a(d,c.key,h++);void 0!==e?b(e):c.continue()}else b()},g.onerror=function(){d(g.error)}}).catch(d)});return j(d,b),d}function B(a,b,c){var d=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var e=new ja(function(c,e){var f;d.ready().then(function(){return f=d._dbInfo,"[object Blob]"===la.call(b)?n(f.db).then(function(a){return a?b:u(b)}):b}).then(function(b){var d=f.db.transaction(f.storeName,"readwrite"),g=d.objectStore(f.storeName),h=g.put(b,a);null===b&&(b=void 0),d.oncomplete=function(){void 0===b&&(b=null),c(b)},d.onabort=d.onerror=function(){var a=h.error?h.error:h.transaction.error;e(a)}}).catch(e)});return j(e,c),e}function C(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readwrite"),g=f.objectStore(e.storeName),h=g.delete(a);f.oncomplete=function(){b()},f.onerror=function(){d(h.error)},f.onabort=function(){var a=h.error?h.error:h.transaction.error;d(a)}}).catch(d)});return j(d,b),d}function D(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo,e=d.db.transaction(d.storeName,"readwrite"),f=e.objectStore(d.storeName),g=f.clear();e.oncomplete=function(){a()},e.onabort=e.onerror=function(){var a=g.error?g.error:g.transaction.error;c(a)}}).catch(c)});return j(c,a),c}function E(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo,e=d.db.transaction(d.storeName,"readonly").objectStore(d.storeName),f=e.count();f.onsuccess=function(){a(f.result)},f.onerror=function(){c(f.error)}}).catch(c)});return j(c,a),c}function F(a,b){var c=this,d=new ja(function(b,d){return a<0?void b(null):void c.ready().then(function(){var e=c._dbInfo,f=e.db.transaction(e.storeName,"readonly").objectStore(e.storeName),g=!1,h=f.openCursor();h.onsuccess=function(){var c=h.result;return c?void(0===a?b(c.key):g?b(c.key):(g=!0,c.advance(a))):void b(null)},h.onerror=function(){d(h.error)}}).catch(d)});return j(d,b),d}function G(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo,e=d.db.transaction(d.storeName,"readonly").objectStore(d.storeName),f=e.openCursor(),g=[];f.onsuccess=function(){var b=f.result;return b?(g.push(b.key),void b.continue()):void a(g)},f.onerror=function(){c(f.error)}}).catch(c)});return j(c,a),c}function H(a){var b,c,d,e,f,g=.75*a.length,h=a.length,i=0;"="===a[a.length-1]&&(g--,"="===a[a.length-2]&&g--);var j=new ArrayBuffer(g),k=new Uint8Array(j);for(b=0;b<h;b+=4)c=na.indexOf(a[b]),d=na.indexOf(a[b+1]),e=na.indexOf(a[b+2]),f=na.indexOf(a[b+3]),k[i++]=c<<2|d>>4,k[i++]=(15&d)<<4|e>>2,k[i++]=(3&e)<<6|63&f;return j}function I(a){var b,c=new Uint8Array(a),d="";for(b=0;b<c.length;b+=3)d+=na[c[b]>>2],d+=na[(3&c[b])<<4|c[b+1]>>4],d+=na[(15&c[b+1])<<2|c[b+2]>>6],d+=na[63&c[b+2]];return c.length%3===2?d=d.substring(0,d.length-1)+"=":c.length%3===1&&(d=d.substring(0,d.length-2)+"=="),d}function J(a,b){var c="";if(a&&(c=Ea.call(a)),a&&("[object ArrayBuffer]"===c||a.buffer&&"[object ArrayBuffer]"===Ea.call(a.buffer))){var d,e=qa;a instanceof ArrayBuffer?(d=a,e+=sa):(d=a.buffer,"[object Int8Array]"===c?e+=ua:"[object Uint8Array]"===c?e+=va:"[object Uint8ClampedArray]"===c?e+=wa:"[object Int16Array]"===c?e+=xa:"[object Uint16Array]"===c?e+=za:"[object Int32Array]"===c?e+=ya:"[object Uint32Array]"===c?e+=Aa:"[object Float32Array]"===c?e+=Ba:"[object Float64Array]"===c?e+=Ca:b(new Error("Failed to get type for BinaryArray"))),b(e+I(d))}else if("[object Blob]"===c){var f=new FileReader;f.onload=function(){var c=oa+a.type+"~"+I(this.result);b(qa+ta+c)},f.readAsArrayBuffer(a)}else try{b(JSON.stringify(a))}catch(c){console.error("Couldn't convert value into a JSON string: ",a),b(null,c)}}function K(a){if(a.substring(0,ra)!==qa)return JSON.parse(a);var b,c=a.substring(Da),d=a.substring(ra,Da);if(d===ta&&pa.test(c)){var e=c.match(pa);b=e[1],c=c.substring(e[0].length)}var f=H(c);switch(d){case sa:return f;case ta:return i([f],{type:b});case ua:return new Int8Array(f);case va:return new Uint8Array(f);case wa:return new Uint8ClampedArray(f);case xa:return new Int16Array(f);case za:return new Uint16Array(f);case ya:return new Int32Array(f);case Aa:return new Uint32Array(f);case Ba:return new Float32Array(f);case Ca:return new Float64Array(f);default:throw new Error("Unkown type: "+d)}}function L(a){var b=this,c={db:null};if(a)for(var d in a)c[d]="string"!=typeof a[d]?a[d].toString():a[d];var e=new ja(function(a,d){try{c.db=openDatabase(c.name,String(c.version),c.description,c.size)}catch(a){return d(a)}c.db.transaction(function(e){e.executeSql("CREATE TABLE IF NOT EXISTS "+c.storeName+" (id INTEGER PRIMARY KEY, key unique, value)",[],function(){b._dbInfo=c,a()},function(a,b){d(b)})})});return c.serializer=Fa,e}function M(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("SELECT * FROM "+e.storeName+" WHERE key = ? LIMIT 1",[a],function(a,c){var d=c.rows.length?c.rows.item(0).value:null;d&&(d=e.serializer.deserialize(d)),b(d)},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function N(a,b){var c=this,d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("SELECT * FROM "+e.storeName,[],function(c,d){for(var f=d.rows,g=f.length,h=0;h<g;h++){var i=f.item(h),j=i.value;if(j&&(j=e.serializer.deserialize(j)),j=a(j,i.key,h+1),void 0!==j)return void b(j)}b()},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function O(a,b,c,d){var e=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var f=new ja(function(f,g){e.ready().then(function(){void 0===b&&(b=null);var h=b,i=e._dbInfo;i.serializer.serialize(b,function(b,j){j?g(j):i.db.transaction(function(c){c.executeSql("INSERT OR REPLACE INTO "+i.storeName+" (key, value) VALUES (?, ?)",[a,b],function(){f(h)},function(a,b){g(b)})},function(b){if(b.code===b.QUOTA_ERR){if(d>0)return void f(O.apply(e,[a,h,c,d-1]));g(b)}})})}).catch(g)});return j(f,c),f}function P(a,b,c){return O.apply(this,[a,b,c,1])}function Q(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("DELETE FROM "+e.storeName+" WHERE key = ?",[a],function(){b()},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function R(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo;d.db.transaction(function(b){b.executeSql("DELETE FROM "+d.storeName,[],function(){a()},function(a,b){c(b)})})}).catch(c)});return j(c,a),c}function S(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo;d.db.transaction(function(b){b.executeSql("SELECT COUNT(key) as c FROM "+d.storeName,[],function(b,c){var d=c.rows.item(0).c;a(d)},function(a,b){c(b)})})}).catch(c)});return j(c,a),c}function T(a,b){var c=this,d=new ja(function(b,d){c.ready().then(function(){var e=c._dbInfo;e.db.transaction(function(c){c.executeSql("SELECT key FROM "+e.storeName+" WHERE id = ? LIMIT 1",[a+1],function(a,c){var d=c.rows.length?c.rows.item(0).key:null;b(d)},function(a,b){d(b)})})}).catch(d)});return j(d,b),d}function U(a){var b=this,c=new ja(function(a,c){b.ready().then(function(){var d=b._dbInfo;d.db.transaction(function(b){b.executeSql("SELECT key FROM "+d.storeName,[],function(b,c){for(var d=[],e=0;e<c.rows.length;e++)d.push(c.rows.item(e).key);a(d)},function(a,b){c(b)})})}).catch(c)});return j(c,a),c}function V(a){var b=this,c={};if(a)for(var d in a)c[d]=a[d];return c.keyPrefix=c.name+"/",c.storeName!==b._defaultConfig.storeName&&(c.keyPrefix+=c.storeName+"/"),b._dbInfo=c,c.serializer=Fa,ja.resolve()}function W(a){var b=this,c=b.ready().then(function(){for(var a=b._dbInfo.keyPrefix,c=localStorage.length-1;c>=0;c--){var d=localStorage.key(c);0===d.indexOf(a)&&localStorage.removeItem(d)}});return j(c,a),c}function X(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=c.ready().then(function(){var b=c._dbInfo,d=localStorage.getItem(b.keyPrefix+a);return d&&(d=b.serializer.deserialize(d)),d});return j(d,b),d}function Y(a,b){var c=this,d=c.ready().then(function(){for(var b=c._dbInfo,d=b.keyPrefix,e=d.length,f=localStorage.length,g=1,h=0;h<f;h++){var i=localStorage.key(h);if(0===i.indexOf(d)){var j=localStorage.getItem(i);if(j&&(j=b.serializer.deserialize(j)),j=a(j,i.substring(e),g++),void 0!==j)return j}}});return j(d,b),d}function Z(a,b){var c=this,d=c.ready().then(function(){var b,d=c._dbInfo;try{b=localStorage.key(a)}catch(a){b=null}return b&&(b=b.substring(d.keyPrefix.length)),b});return j(d,b),d}function $(a){var b=this,c=b.ready().then(function(){for(var a=b._dbInfo,c=localStorage.length,d=[],e=0;e<c;e++)0===localStorage.key(e).indexOf(a.keyPrefix)&&d.push(localStorage.key(e).substring(a.keyPrefix.length));return d});return j(c,a),c}function _(a){var b=this,c=b.keys().then(function(a){return a.length});return j(c,a),c}function aa(a,b){var c=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var d=c.ready().then(function(){var b=c._dbInfo;localStorage.removeItem(b.keyPrefix+a)});return j(d,b),d}function ba(a,b,c){var d=this;"string"!=typeof a&&(console.warn(a+" used as a key, but it is not a string."),a=String(a));var e=d.ready().then(function(){void 0===b&&(b=null);var c=b;return new ja(function(e,f){var g=d._dbInfo;g.serializer.serialize(b,function(b,d){if(d)f(d);else try{localStorage.setItem(g.keyPrefix+a,b),e(c)}catch(a){"QuotaExceededError"!==a.name&&"NS_ERROR_DOM_QUOTA_REACHED"!==a.name||f(a),f(a)}})})});return j(e,c),e}function ca(a,b){a[b]=function(){var c=arguments;return a.ready().then(function(){return a[b].apply(a,c)})}}function da(){for(var a=1;a<arguments.length;a++){var b=arguments[a];if(b)for(var c in b)b.hasOwnProperty(c)&&(Oa(b[c])?arguments[0][c]=b[c].slice():arguments[0][c]=b[c])}return arguments[0]}function ea(a){for(var b in Ja)if(Ja.hasOwnProperty(b)&&Ja[b]===a)return!0;return!1}var fa="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(a){return typeof a}:function(a){return a&&"function"==typeof Symbol&&a.constructor===Symbol&&a!==Symbol.prototype?"symbol":typeof a},ga=e();"undefined"==typeof Promise&&a(3);var ha,ia,ja=Promise,ka="local-forage-detect-blob-support",la=Object.prototype.toString,ma={_driver:"asyncStorage",_initStorage:y,iterate:A,getItem:z,setItem:B,removeItem:C,clear:D,length:E,key:F,keys:G},na="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",oa="~~local_forage_type~",pa=/^~~local_forage_type~([^~]+)~/,qa="__lfsc__:",ra=qa.length,sa="arbf",ta="blob",ua="si08",va="ui08",wa="uic8",xa="si16",ya="si32",za="ur16",Aa="ui32",Ba="fl32",Ca="fl64",Da=ra+sa.length,Ea=Object.prototype.toString,Fa={serialize:J,deserialize:K,stringToBuffer:H,bufferToString:I},Ga={_driver:"webSQLStorage",_initStorage:L,iterate:N,getItem:M,setItem:P,removeItem:Q,clear:R,length:S,key:T,keys:U},Ha={_driver:"localStorageWrapper",_initStorage:V,iterate:Y,getItem:X,setItem:ba,removeItem:aa,clear:W,length:_,key:Z,keys:$},Ia={},Ja={INDEXEDDB:"asyncStorage",LOCALSTORAGE:"localStorageWrapper",WEBSQL:"webSQLStorage"},Ka=[Ja.INDEXEDDB,Ja.WEBSQL,Ja.LOCALSTORAGE],La=["clear","getItem","iterate","key","keys","length","removeItem","setItem"],Ma={description:"",driver:Ka.slice(),name:"localforage",size:4980736,storeName:"keyvaluepairs",version:1},Na={};Na[Ja.INDEXEDDB]=f(),Na[Ja.WEBSQL]=g(),Na[Ja.LOCALSTORAGE]=h();var Oa=Array.isArray||function(a){return"[object Array]"===Object.prototype.toString.call(a)},Pa=function(){function a(b){d(this,a),this.INDEXEDDB=Ja.INDEXEDDB,this.LOCALSTORAGE=Ja.LOCALSTORAGE,this.WEBSQL=Ja.WEBSQL,this._defaultConfig=da({},Ma),this._config=da({},this._defaultConfig,b),this._driverSet=null,this._initDriver=null,this._ready=!1,this._dbInfo=null,this._wrapLibraryMethodsWithReady(),this.setDriver(this._config.driver).catch(function(){})}return a.prototype.config=function(a){if("object"===("undefined"==typeof a?"undefined":fa(a))){if(this._ready)return new Error("Can't call config() after localforage has been used.");for(var b in a){if("storeName"===b&&(a[b]=a[b].replace(/\W/g,"_")),"version"===b&&"number"!=typeof a[b])return new Error("Database version must be a number.");this._config[b]=a[b]}return!("driver"in a&&a.driver)||this.setDriver(this._config.driver)}return"string"==typeof a?this._config[a]:this._config},a.prototype.defineDriver=function(a,b,c){var d=new ja(function(b,c){try{var d=a._driver,e=new Error("Custom driver not compliant; see https://mozilla.github.io/localForage/#definedriver"),f=new Error("Custom driver name already in use: "+a._driver);if(!a._driver)return void c(e);if(ea(a._driver))return void c(f);for(var g=La.concat("_initStorage"),h=0;h<g.length;h++){var i=g[h];if(!i||!a[i]||"function"!=typeof a[i])return void c(e)}var j=ja.resolve(!0);"_support"in a&&(j=a._support&&"function"==typeof a._support?a._support():ja.resolve(!!a._support)),j.then(function(c){Na[d]=c,Ia[d]=a,b()},c)}catch(a){c(a)}});return k(d,b,c),d},a.prototype.driver=function(){return this._driver||null},a.prototype.getDriver=function(a,b,c){var d=this,e=ja.resolve().then(function(){if(!ea(a)){if(Ia[a])return Ia[a];throw new Error("Driver not found.")}switch(a){case d.INDEXEDDB:return ma;case d.LOCALSTORAGE:return Ha;case d.WEBSQL:return Ga}});return k(e,b,c),e},a.prototype.getSerializer=function(a){var b=ja.resolve(Fa);return k(b,a),b},a.prototype.ready=function(a){var b=this,c=b._driverSet.then(function(){return null===b._ready&&(b._ready=b._initDriver()),b._ready});return k(c,a,a),c},a.prototype.setDriver=function(a,b,c){function d(){g._config.driver=g.driver()}function e(a){return g._extend(a),d(),g._ready=g._initStorage(g._config),g._ready}function f(a){return function(){function b(){for(;c<a.length;){var f=a[c];return c++,g._dbInfo=null,g._ready=null,g.getDriver(f).then(e).catch(b)}d();var h=new Error("No available storage method found.");return g._driverSet=ja.reject(h),g._driverSet}var c=0;return b()}}var g=this;Oa(a)||(a=[a]);var h=this._getSupportedDrivers(a),i=null!==this._driverSet?this._driverSet.catch(function(){return ja.resolve()}):ja.resolve();return this._driverSet=i.then(function(){var a=h[0];return g._dbInfo=null,g._ready=null,g.getDriver(a).then(function(a){g._driver=a._driver,d(),g._wrapLibraryMethodsWithReady(),g._initDriver=f(h)})}).catch(function(){d();var a=new Error("No available storage method found.");return g._driverSet=ja.reject(a),g._driverSet}),k(this._driverSet,b,c),this._driverSet},a.prototype.supports=function(a){return!!Na[a]},a.prototype._extend=function(a){da(this,a)},a.prototype._getSupportedDrivers=function(a){for(var b=[],c=0,d=a.length;c<d;c++){var e=a[c];this.supports(e)&&b.push(e)}return b},a.prototype._wrapLibraryMethodsWithReady=function(){for(var a=0;a<La.length;a++)ca(this,La[a])},a.prototype.createInstance=function(b){return new a(b)},a}(),Qa=new Pa;b.exports=Qa},{3:3}]},{},[4])(4)}); | zhiyao-huihuxi-jiuneng-zuomingxiang | /zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1.tar.gz/zhiyao-huihuxi-jiuneng-zuomingxiang-2022.10.9.1/ZhiyaoHuihuxiJiunengZuomingxiang/js/libs/localforage.min.js | localforage.min.js |
try: import httplib
except ImportError:
import http.client as httplib
import urllib
import time
import hashlib
import json
import top
import itertools
import mimetypes
'''
定义一些系统变量
'''
SYSTEM_GENERATE_VERSION = "taobao-sdk-python-20160607"
P_APPKEY = "app_key"
P_API = "method"
P_SESSION = "session"
P_ACCESS_TOKEN = "access_token"
P_VERSION = "v"
P_FORMAT = "format"
P_TIMESTAMP = "timestamp"
P_SIGN = "sign"
P_SIGN_METHOD = "sign_method"
P_PARTNER_ID = "partner_id"
P_CODE = 'code'
P_SUB_CODE = 'sub_code'
P_MSG = 'msg'
P_SUB_MSG = 'sub_msg'
N_REST = '/router/rest'
def sign(secret, parameters):
#===========================================================================
# '''签名方法
# @param secret: 签名需要的密钥
# @param parameters: 支持字典和string两种
# '''
#===========================================================================
# 如果parameters 是字典类的话
if hasattr(parameters, "items"):
keys = parameters.keys()
keys.sort()
parameters = "%s%s%s" % (secret,
str().join('%s%s' % (key, parameters[key]) for key in keys),
secret)
sign = hashlib.md5(parameters).hexdigest().upper()
return sign
def mixStr(pstr):
if(isinstance(pstr, str)):
return pstr
elif(isinstance(pstr, unicode)):
return pstr.encode('utf-8')
else:
return str(pstr)
class FileItem(object):
def __init__(self,filename=None,content=None):
self.filename = filename
self.content = content
class MultiPartForm(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = "PYTHON_SDK_BOUNDARY"
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, str(value)))
return
def add_file(self, fieldname, filename, fileHandle, mimetype=None):
"""Add a file to be uploaded."""
body = fileHandle.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((mixStr(fieldname), mixStr(filename), mixStr(mimetype), mixStr(body)))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'Content-Type: text/plain; charset=UTF-8',
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'Content-Transfer-Encoding: binary',
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
class TopException(Exception):
#===========================================================================
# 业务异常类
#===========================================================================
def __init__(self):
self.errorcode = None
self.message = None
self.subcode = None
self.submsg = None
self.application_host = None
self.service_host = None
def __str__(self, *args, **kwargs):
sb = "errorcode=" + mixStr(self.errorcode) +\
" message=" + mixStr(self.message) +\
" subcode=" + mixStr(self.subcode) +\
" submsg=" + mixStr(self.submsg) +\
" application_host=" + mixStr(self.application_host) +\
" service_host=" + mixStr(self.service_host)
return sb
class RequestException(Exception):
#===========================================================================
# 请求连接异常类
#===========================================================================
pass
class RestApi(object):
#===========================================================================
# Rest api的基类
#===========================================================================
def __init__(self, domain='gw.api.taobao.com', port = 80):
#=======================================================================
# 初始化基类
# Args @param domain: 请求的域名或者ip
# @param port: 请求的端口
#=======================================================================
self.__domain = domain
self.__port = port
self.__httpmethod = "POST"
if(top.getDefaultAppInfo()):
self.__app_key = top.getDefaultAppInfo().appkey
self.__secret = top.getDefaultAppInfo().secret
def get_request_header(self):
return {
'Content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
"Cache-Control": "no-cache",
"Connection": "Keep-Alive",
}
def set_app_info(self, appinfo):
#=======================================================================
# 设置请求的app信息
# @param appinfo: import top
# appinfo top.appinfo(appkey,secret)
#=======================================================================
self.__app_key = appinfo.appkey
self.__secret = appinfo.secret
def getapiname(self):
return ""
def getMultipartParas(self):
return [];
def getTranslateParas(self):
return {};
def _check_requst(self):
pass
def getResponse(self, authrize=None, timeout=30):
#=======================================================================
# 获取response结果
#=======================================================================
connection = httplib.HTTPConnection(self.__domain, self.__port, False, timeout)
sys_parameters = {
P_FORMAT: 'json',
P_APPKEY: self.__app_key,
P_SIGN_METHOD: "md5",
P_VERSION: '2.0',
P_TIMESTAMP: str(long(time.time() * 1000)),
P_PARTNER_ID: SYSTEM_GENERATE_VERSION,
P_API: self.getapiname(),
}
if authrize is not None:
sys_parameters[P_SESSION] = authrize
application_parameter = self.getApplicationParameters()
sign_parameter = sys_parameters.copy()
sign_parameter.update(application_parameter)
sys_parameters[P_SIGN] = sign(self.__secret, sign_parameter)
connection.connect()
header = self.get_request_header();
if(self.getMultipartParas()):
form = MultiPartForm()
for key, value in application_parameter.items():
form.add_field(key, value)
for key in self.getMultipartParas():
fileitem = getattr(self,key)
if(fileitem and isinstance(fileitem,FileItem)):
form.add_file(key,fileitem.filename,fileitem.content)
body = str(form)
header['Content-type'] = form.get_content_type()
else:
body = urllib.urlencode(application_parameter)
url = N_REST + "?" + urllib.urlencode(sys_parameters)
connection.request(self.__httpmethod, url, body=body, headers=header)
response = connection.getresponse();
if response.status is not 200:
raise RequestException('invalid http status ' + str(response.status) + ',detail body:' + response.read())
result = response.read()
jsonobj = json.loads(result)
if jsonobj.has_key("error_response"):
error = TopException()
if jsonobj["error_response"].has_key(P_CODE) :
error.errorcode = jsonobj["error_response"][P_CODE]
if jsonobj["error_response"].has_key(P_MSG) :
error.message = jsonobj["error_response"][P_MSG]
if jsonobj["error_response"].has_key(P_SUB_CODE) :
error.subcode = jsonobj["error_response"][P_SUB_CODE]
if jsonobj["error_response"].has_key(P_SUB_MSG) :
error.submsg = jsonobj["error_response"][P_SUB_MSG]
error.application_host = response.getheader("Application-Host", "")
error.service_host = response.getheader("Location-Host", "")
raise error
return jsonobj
def getApplicationParameters(self):
application_parameter = {}
for key, value in self.__dict__.iteritems():
if not key.startswith("__") and not key in self.getMultipartParas() and not key.startswith("_RestApi__") and value is not None :
if(key.startswith("_")):
application_parameter[key[1:]] = value
else:
application_parameter[key] = value
#查询翻译字典来规避一些关键字属性
translate_parameter = self.getTranslateParas()
for key, value in application_parameter.iteritems():
if key in translate_parameter:
application_parameter[translate_parameter[key]] = application_parameter[key]
del application_parameter[key]
return application_parameter | zhj-top | /zhj_top-1.0.0.tar.gz/zhj_top-1.0.0/top/api/base.py | base.py |
import peewee as pv
from playhouse import sqlite_ext
from playhouse.shortcuts import model_to_dict
from pathlib import Path
database = sqlite_ext.SqliteDatabase(str(Path(__file__).with_name('dict.db')), pragmas={
# 'query_only': 'ON'
})
class BaseModel(pv.Model):
class Meta:
database = database
class Hanzi(BaseModel):
hanzi = pv.TextField(unique=True)
pinyin = pv.TextField(null=True)
meaning = pv.TextField(null=True)
heisig = pv.IntegerField(null=True)
kanji = pv.TextField(null=True)
junda = pv.IntegerField(null=True, unique=True)
vocabs = sqlite_ext.JSONField(default=list)
sentences = sqlite_ext.JSONField(default=list)
compositions = sqlite_ext.JSONField(default=list)
supercompositions = sqlite_ext.JSONField(default=list)
variants = sqlite_ext.JSONField(default=list)
tags = sqlite_ext.JSONField(default=list)
cache = dict()
def __str__(self):
return '{hanzi} {pinyin} {meaning}'.format(**dict(
hanzi=self.hanzi,
pinyin=('[{}]'.format(self.pinyin) if self.pinyin else ''),
meaning=(self.meaning if self.meaning else '')
))
def to_excel(self):
d = model_to_dict(self)
d.update({
'vocabs': ','.join(d['vocabs']),
'sentences': ','.join(d['sentences']),
'compositions': ','.join(d['compositions']),
'supercompositions': ','.join(d['supercompositions']),
'variants': ','.join(d['variants']),
'tags': ','.join(d['tags']),
})
assert all(isinstance(v, (str, int, float)) for v in d.values() if v)
return d
class Vocab(BaseModel):
simplified = pv.TextField()
traditional = pv.TextField(null=True)
pinyin = pv.TextField(null=True)
english = pv.TextField(null=True)
frequency = pv.FloatField()
# hanzis = sqlite_ext.JSONField(default=list)
sentences = sqlite_ext.JSONField(default=list)
tags = sqlite_ext.JSONField(default=list)
class Meta:
indexes = (
(('simplified', 'traditional', 'pinyin', 'english'), True),
)
def __str__(self):
return '{simplified} {traditional} {pinyin} {english}'.format(**dict(
simplified=self.simplified,
traditional=(self.traditional if self.traditional else ''),
pinyin=('[{}]'.format(self.pinyin) if self.pinyin else ''),
english=(self.english if self.english else '')
))
@classmethod
def match(cls, vocab):
return cls.select().where((cls.simplified == vocab) | (cls.traditional == vocab))
def to_excel(self):
d = model_to_dict(self)
d.update({
'frequency': d['frequency'] * 10 ** 6,
'sentences': ','.join(d['sentences']),
'tags': ','.join(d['tags'])
})
assert all(isinstance(v, (str, int, float)) for v in d.values() if v)
return d
class Sentence(BaseModel):
sentence = pv.TextField()
pinyin = pv.TextField(null=True)
english = pv.TextField(null=True)
order = pv.IntegerField(null=True, unique=True)
# hanzis = sqlite_ext.JSONField(default=list)
vocabs = sqlite_ext.JSONField(default=list)
tags = sqlite_ext.JSONField(default=list)
class Meta:
indexes = (
(('sentence', 'pinyin', 'english'), True),
)
def __str__(self):
return '{sentence} {pinyin} {english}'.format(**dict(
sentence=self.sentence,
pinyin=('[{}]'.format(self.pinyin) if self.pinyin else ''),
english=(self.english if self.english else '')
))
def to_excel(self):
d = model_to_dict(self)
d.update({
'vocabs': ','.join(d['vocabs']),
'tags': ','.join(d['tags'])
})
assert all(isinstance(v, (str, int, float)) for v in d.values() if v)
return d | zhlib-snapshot | /zhlib-snapshot-0.1.2.1.tar.gz/zhlib-snapshot-0.1.2.1/zhlib_snapshot/db.py | db.py |
from wordfreq import word_frequency
import math
from . import db
from .util import find_hanzi, find_vocab
class Level:
TIER_MIN = 1
TIER_MAX = 6
FREQ_FACTOR = 10**6
@classmethod
def hanzi_get_level(cls, hanzi):
db_hanzi = db.Hanzi.get_or_none(hanzi=hanzi)
if db_hanzi:
return db_hanzi.junda
def search_text(self, text, format_='excel'):
return {
'hanzi': sorted(self.search_hanzi_iter(text, format_=format_),
key=lambda x: x[0] if x[0] else math.inf),
'vocab': sorted(self.search_vocab_iter(text, format_=format_),
key=lambda x: -x[0])
}
def search_hanzi_iter(self, text, format_=None):
for hanzi in find_hanzi(text):
level = self.hanzi_get_level(hanzi)
if level:
tier = self.normalize(level // 400 + 1)
else:
tier = self.TIER_MAX
db_hanzi = db.Hanzi.get_or_none(hanzi=hanzi)
if db_hanzi:
if format_ == 'excel':
yield level, tier, db_hanzi.to_excel()
elif format_ == 'ankix':
yield level, tier, db_hanzi.to_ankix()
else:
yield level, tier, db_hanzi
else:
yield level, tier, dict(hanzi=hanzi)
def search_vocab_iter(self, text, format_=None):
for vocab in find_vocab(text):
freq = word_frequency(vocab, 'zh') ** self.FREQ_FACTOR
try:
tier = self.normalize(7 - math.ceil(math.log10(freq) * 2))
except ValueError:
tier = self.TIER_MAX
db_vocabs = db.Vocab.match(vocab)
if len(db_vocabs) > 0:
if format_ == 'excel':
yield freq, tier, db_vocabs[0].to_excel()
elif format_ == 'ankix':
yield freq, tier, db_vocabs[0].to_ankix()
else:
yield freq, tier, db_vocabs[0]
else:
yield freq, tier, dict(simplified=vocab)
@classmethod
def normalize(cls, tier):
tier = int(tier)
if tier < cls.TIER_MIN:
return cls.TIER_MIN
elif tier > cls.TIER_MAX:
return cls.TIER_MAX
return tier | zhlib-snapshot | /zhlib-snapshot-0.1.2.1.tar.gz/zhlib-snapshot-0.1.2.1/zhlib_snapshot/level.py | level.py |
# zhlint
Note: This project is highly related to Chinese, so the document is writtern in Chinese.
## 简介
一个处理文档风格的工具:
* 支持文档风格的检查(使用 `check` 命令)。
* 支持文档风格的自动修复(使用 `fix` 命令)。
注意:
* 目前仅支持 Markdown 格式文档的检测与修复。
## 安装与使用
### 使用 pip 安装
```
pip install zhlint
```
安装成功后,可执行 `zhlint` 命令行程序处理文档。
### 检查文档风格
`zhlint check SRC` 命令会检查输入 `SRC`,并将检测到的文档风格错误输出到 stdout。参数 `SRC` 可为:
* 文件路径。
* `-`,表示 stdin。
示例如下:
```shell
$ ccat doc.md
只有中文或中英文混排中,一律使用中文全角标点. 英文 **english**与非标点的中文之间需要有一个空格。
支持简单的错误名词检测,如 APP、ios 这类的。
$ zhlint check doc.md
==========================================
E101: 英文与非标点的中文之间需要有一个空格
==========================================
LINE: 1
角标点. 英文 english与非标点的中文之间需
--
........................................
==================================================
E201: 只有中文或中英文混排中,一律使用中文全角标点
==================================================
LINE: 1
中文或中英文混排中,一律使用中文全角标
-
.....................................
LINE: 1
律使用中文全角标点.
-
...................
==================
E301: 常用名词错误
==================
LINE: 3
的错误名词检测,如 APP、ios 这类的。
---
....................................
LINE: 3
名词检测,如 APP、ios 这类的。
---
..............................
```
### 修复文档风格
`zhlint fix SRC [DST]` 命令会尝试修复 `SRC` 中出现的风格错误,参数 `SRC` 可以为文件路径或者 `-`:
* 如果省略 `DST`,修复后的文本将打印到标准输出。
* 如果传入 `DST`,修复后的文本将写入到 `DST`。
示例如下:
```shell
$ zhlint fix doc.md
只有中文或中英文混排中,一律使用中文全角标点。 英文 **english** 与非标点的中文之间需要有一个空格。
支持简单的错误名词检测,如 App、iOS 这类的。
$ zhlint fix doc.md fixed-doc.md
$ colordiff doc.md fixed-doc.md
1c1
< 只有中文或中英文混排中,一律使用中文全角标点. 英文 **english**与非标点的中文之间需要有一个空格。
---
> 只有中文或中英文混排中,一律使用中文全角标点。 英文 **english** 与非标点的中文之间需要有一个空格。
3c3
< 支持简单的错误名词检测,如 APP、ios 这类的。
---
> 支持简单的错误名词检测,如 App、iOS 这类的。
```
## 支持的检查项目
| 错误码 | 检查范围 | 描述 |
| ------ | -------- | ------------------------------------------------------------------------------ |
| E101 | 段落 | 英文与非标点的中文之间需要有一个空格 |
| E102 | 段落 | 数字与非标点的中文之间需要有一个空格 |
| E103 | 段落 | 除了`%`、`℃`、以及倍数单位(如 `2x`、`3n`)之外,其余数字与单位之间需要加空格 |
| E104 | 段落 | 书写时括号中全为数字,则括号用半角括号且首括号前要空一格 |
| E201 | 句子 | 只有中文或中英文混排中,一律使用中文全角标点 |
| E202 | 句子 | 如果出现整句英文,则在这句英文中使用英文、半角标点 |
| E203 | 段落 | 中文标点与其他字符间一律不加空格 |
| E204 | 句子 | 中文文案中使用中文引号`「」`和`『』`,其中`「」`为外层引号 |
| E205 | 段落 | 省略号请使用`……`标准用法 |
| E206 | 段落 | 感叹号请使用`!`标准用法 |
| E207 | 段落 | 请勿在文章内使用`~` |
| E301 | 段落 | 常用名词错误 |
详情见 [写作规范和格式规范,DaoCloud 文档](http://docs-static.daocloud.io/write-docs/format)。
以下是各项错误的简单示例。其中,*触发样例* 是违反规则的实例,*非触发样例* 是符合文档风格的实例。
### E101
描述:英文与非标点的中文之间需要有一个空格。
触发样例:
```
中文english
中文 english
中文\tenglish
```
非触发样例:
```
中文 english
```
### E102
描述:数字与非标点的中文之间需要有一个空格。
触发样例:
```
中文42
中文 42
```
非触发样例:
```
中文 42
```
### E103
描述:除了`%`、`℃`、以及倍数单位(如 `2x`、`3n`)之外,其余数字与单位之间需要加空格。
触发样例:
```
42μ
42 μ
```
非触发样例:
```
42 μ
42x
42n
42%
42%
42℃
Q3
136-4321-1234
word2vec
```
### E104
描述:书写时括号中全为数字,则括号用半角括号且首括号前要空一格。
触发样例:
```
中文(42)
中文(42)
中文(42)
中文(42)
中文 (42)
(42)
```
非触发样例:
```
中文 (42)
(42)
```
### E201
描述:只有中文或中英文混排中,一律使用中文全角标点。
触发样例:
```
有中文, 错误.
中文'测试'
中文"测试"
LaTeX 公式 $$.
LaTeX 公式,$$
LaTeX 公式 \(\).
LaTeX 公式,\(\)
```
非触发样例:
```
有中文,正确。
有中文,正确......
P.S. 这是一行中文。
LaTeX 公式 $$
LaTeX 公式 \(\)
邮箱:[email protected]
有中文,1.0
有中文,www.google.com
链接地址 http://google.com
```
### E202
描述:如果出现整句英文,则在这句英文中使用英文、半角标点。
触发样例:
```
pure english,nothing wrong。
```
非触发样例:
```
pure english, nothing wrong.
```
### E203
描述:中文标点与其他字符间一律不加空格。
触发样例:
```
中文, 测试
中文 。测试
「 中文」
```
非触发样例:
```
中文,测试
中文;测试
「中文」
```
### E204
描述:中文文案中使用中文引号`「」`和`『』`,其中`「」`为外层引号。
触发样例:
```
中文‘测试’
中文“测试”
```
非触发样例:
```
中文「测试」
```
### E205
描述:省略号请使用`……`标准用法。
触发样例:
```
中文...
中文.......
中文。。。
```
非触发样例:
```
中文......
```
### E206
描述:感叹号请使用`!`标准用法。
触发样例:
```
中文!!
中文!!
中文!!
中文??
中文??
中文??
```
非触发样例:
```
中文!
中文!
中文?
中文?
```
### E207
描述:请勿在文章内使用`~`。
触发样例:
```
中文~
```
非触发样例:
```
中文
```
### E301
描述:常用名词错误。
触发样例:
```
APP
app
android
ios
IOS
IPHONE
iphone
AppStore
app store
wifi
Wifi
Wi-fi
E-mail
Email
PS
ps
Ps.
```
非触发样例:
```
App
Android
```
| zhlint | /zhlint-0.3.2.tar.gz/zhlint-0.3.2/README.md | README.md |
# zhlite






# 说明
zhlite 是一个知乎的 Python 轻量客户端,全部功能全部采用知乎官方非公布 api 实现。因为很多接口需要登陆才能访问,所以 `zhlite` 需要登录才可以稳定使用。
目前对于所有 `zhlite` 获取的信息,均只可以查看不可以修改。
# 功能
- [x] 用户登录
- [x] 登陆用户的基本信息
- [x] 登陆用户的关注和被关注信息
- [x] 登陆用户的提问
- [x] 登陆用户的回答
- [x] 登陆用户的文章
- [x] 以登陆用户的身份访问问题
- [x] 以登录用户的身份访问回答
- [x] 批量下载回答中的图片
- [x] 批量下载回答中的视频
- [ ] 获取回答的评论
- [x] 增加代理支持
# 安装
`pip3 install zhlite`
# 使用
`zhlite` 有几个关键核心类:
1. Auth (用户认证模块)
2. User (用户模块)
3. Question (问题模块)
4. Answer (回答模块)
5. Article (文章模块)
## 模块说明
### Auth
| 属性 | 类型 | 描述 |
| :----:| :----: | :----: |
| login() | method | 用户登陆 |
| islogin | bool | 是否登陆状态 |
| profile | User Object | 登陆用户 |
| platform | str | 当前运行的系统类型 |
### User
| 属性 | 类型 | 描述 |
| :----:| :----: | :----: |
| id | str | 用户自定义ID |
| uid | str | 用户ID |
| name | str | 显示名字 |
| gender | str | 性别 0:女 1:男 -1:未知 |
| url | str | 用户url连接 |
| employments | dict | 职业信息 |
| educations | dict | 教育信息 |
| locations | list | 地区信息 |
| avatar | str | 用户头像 |
| headline | str | 个人简介 |
| is_vip | bool | 盐选会员 |
| is_org | bool | 机构号 |
| follower_count | int | 关注者数量 |
| followers | generator | 关注者 |
| following_count | int | 关注的人数量 |
| followings | generator | 关注的人 |
| answer_count | int | 回答数量 |
| answers | generator | 回答 |
| question_count | int | 提问数量 |
| questions | generator | 提问 |
| article_count | int | 文章数量 |
| articles | generator | 文章 |
| voteup_count | int | 获得赞同数量 |
| visit_count | int | 来访者数量 |
### Question
| 属性 | 类型 | 描述 |
| :----:| :----: | :----: |
| id | int | 问题ID |
| title | str | 问题标题 |
| detail | str | 问题描述 |
| topics | list | 问题标签 |
| type | str | 问题状态 |
| created | datetime | 提问时间 |
| updated | datetime | 最后一次修改时间 |
| author | User Object | 提问人 |
| answers | generator | 回答 |
### Answer
| 属性 | 类型 | 描述 |
| :----:| :----: | :----: |
| id | int | 回答ID |
| type | str | 回答状态 |
| author | User Object | 回答者 |
| excerpt | str | 摘要 |
| content | str | 回答(包含HTML信息) |
| text | str | 回答(不包含HTML信息) |
| comment_count | int | 评论数 |
| voteup_count | int | 赞同数 |
| created | datetime | 回答时间 |
| updated | datetime | 最后一次修改时间 |
| question | Question Object | 对应的问题 |
| images | generator | 该回答的图片 |
| videos | generator | 该回答的视频 |
### Article
| 属性 | 类型 | 描述 |
| :----:| :----: | :----: |
| id | int | 文章ID |
| title | str | 文章标题 |
| author | User Object | 发布者 |
| topics | list | 话题 |
| excerpt | str | 摘要 |
| content | str | 回答(包含HTML信息) |
| text | str | 回答(不包含HTML信息) |
| comment_count | int | 评论数 |
| voteup_count | int | 赞同数 |
| created | datetime | 发布时间 |
| updated | datetime | 最后一次修改时间 |
# 简要使用
## 使用代理(proxy)
```python
>>> import zhlite
>>> proxies = {
>>> "http":"http://ip:port",
>>> "https":"https://ip:port"
>>> }
>>> zhlite.set_proxy(proxies)
```
## 用户认证(Auth)
第一次实例化 `Auth` 对象时需要通过手机号和密码登陆,之后会生成一个 `cookies.txt` 文件保存登录信息,以后无需再次重复登陆。如需重新登陆,可以通过 `.login(relogin=True)` 强制重新登陆,并刷新 `cookies.txt` 文件
**注意:短时间内多次通过密码登陆会导致账户异常,账户异常会强制要求用户更改密码并短时间内锁定ip**
```python
>>> from zhlite import Auth
>>> auth = Auth()
>>> auth.login(relogin=True)
```
## 登陆用户
用户登陆之后可通过 `.profile` 获得一个登录用户的 `User` 对象
```python
>>> from zhlite import Auth
>>> auth = Auth()
>>> auth.profile
<zhlite.zhlite.User object at 0x0000024C6C989630>
```
登录成功之后会在当前路径下保存一个 `cookies.txt` 作为下次登陆免输入的 `cookies` 文件,如果需要强制重新登陆或者更换登录用户,可以通过 `.islogin(relogin=True)` (`relogin` 指定 `True` 即为强制登陆)
## 用户(User)
```python
>>> from zhlite import Auth, User, Question, Answer
>>> auth = Auth()
>>> user = User('zhihuadmin') # 知乎小管家
>>> user
<zhlite.zhlite.User object at 0x00000293F66A81D0>
>>> user.id
'zhihuadmin'
>>> user.name
'知乎小管家'
>>> user.questions
<generator object User.questions at 0x00000293F67620C0>
>>> list(user.questions) # 谨慎用 list() 如果用户的提问数量很多会导致性能问题
[<zhlite.zhlite.Question object at 0x00000293F77164E0>, <zhlite.zhlite.Question object at 0x00000293F77FD1D0>, <zhlite.zhlite.Question object at 0x00000293F76AB048>, <zhlite.zhlite.Question object at 0x00000293F6691C18>, <zhlite.zhlite.Question object at 0x00000293F7582E80>, <zhlite.zhlite.Question object at 0x00000293F66A80B8>, <zhlite.zhlite.Question object at 0x00000293F758E390>, <zhlite.zhlite.Question object at 0x00000293F7716400>, <zhlite.zhlite.Question object at 0x00000293F6691BE0>, <zhlite.zhlite.Question object at 0x00000293F76ECA90>]
```
## 问题(Question)
```python
>>> from zhlite import Auth
>>> auth = Auth()
>>> question = Question('19550225')
>>> question
<zhlite.zhlite.Question object at 0x00000293F76ECF28>
>>> question.title
'如何使用知乎?'
>>> question.author
<zhlite.zhlite.User object at 0x00000293F76EC8D0>
>>> question.created
'2010-12-20 03:27:20'
>>> question.answers
<generator object Question.answers at 0x00000293F67622A0>
```
## 回答(Answer)
```python
>>> answer = Answer('95070154')
>>> answer
<zhlite.zhlite.Answer object at 0x00000293F77FD1D0>
>>> answer.excerpt
'本问题隶属于「知乎官方指南」:属于「知乎官方指南」的问答有哪些? -- 在知乎上回答问题有一个基本原则:尽可能提供详细的解 释和说明。 不要灌水——不要把「评论」当作「答案」来发布。 如果你对问题本身或别人的答案有自己的看法,你可以通过「评论」来进行,不要把评论当作答案来发布。那样的话,该回答会被其他用户点击「没有帮助」而折叠起来,反而起不到实际效果,也无助于提供高质量的答案。 --------关于回答--------- …'
>>> answer.comment_count
11
>>> answer.created
'2016-04-13 13:48:52'
>>> answer.question
<zhlite.zhlite.Question object at 0x00000293F76AB048>
```
| zhlite | /zhlite-1.8.3.tar.gz/zhlite-1.8.3/README.md | README.md |
.. Copyright 2017 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
zhmc-ansible-modules - Ansible modules for the IBM Z HMC Web Services API
=========================================================================
.. image:: https://img.shields.io/pypi/v/zhmc-ansible-modules.svg
:target: https://pypi.python.org/pypi/zhmc-ansible-modules/
:alt: Version on Pypi
.. image:: https://github.com/zhmcclient/zhmc-ansible-modules/workflows/test/badge.svg?branch=master
:target: https://github.com/zhmcclient/zhmc-ansible-modules/actions?query=workflow%3Atest
:alt: Test status (master)
.. image:: https://readthedocs.org/projects/zhmc-ansible-modules/badge/?version=latest
:target: https://zhmc-ansible-modules.readthedocs.io/en/latest/
:alt: Docs status (latest)
.. image:: https://img.shields.io/coveralls/zhmcclient/zhmc-ansible-modules.svg
:target: https://coveralls.io/r/zhmcclient/zhmc-ansible-modules
:alt: Coverage result
.. contents:: Contents:
:local:
Moving to Ansible Galaxy
========================
Starting with version 0.9.0, the zhmc Ansible modules are no longer distributed
as the
`zhmc-ansible-modules package on Pypi <https://pypi.org/project/zhmc-ansible-modules/>`_,
but as the
`ibm.ibm_zhmc collection on Galaxy <https://galaxy.ansible.com/ibm/ibm_zhmc/>`_.
Overview
========
The zhmc-ansible-modules Python package contains `Ansible`_ modules that can
manage platform resources on `IBM Z`_ and `LinuxONE`_ machines that are in
the Dynamic Partition Manager (DPM) operational mode.
The goal of this package is to be able to utilize the power and ease of use
of Ansible for the management of IBM Z platform resources.
The IBM Z resources that can be managed include Partitions, HBAs, NICs, and
Virtual Functions.
The Ansible modules in the zhmc-ansible-modules package are fully
`idempotent <http://docs.ansible.com/ansible/latest/glossary.html#term-idempotency>`_,
following an important principle for Ansible modules.
The idempotency of a module allows Ansible playbooks to specify the desired end
state for a resource, regardless of what the current state is. For example, a
IBM Z partition can be specified to have ``state=active`` which means that
it must exist and be in the active operational status. Depending on the current
state of the partition, actions will be taken by the module to reach this
desired end state: If the partition does not exist, it will be created and
started. If it exists but is not active, it will be started. If it is already
active, nothing will be done. Other initial states including transitional
states such as starting or stopping also will be taken care of.
The idempotency of modules makes Ansible playbooks restartable: If an error
happens and some things have been changed already, the playbook can simply be
re-run and will automatically do the right thing, because the initial state
does not matter for reaching the desired end state.
The Ansible modules in the zhmc-ansible-modules package are written in Python
and interact with the Web Services API of the Hardware Management Console (HMC)
of the machines to be managed, by using the API of the `zhmcclient`_ Python
package.
.. _Ansible: https://www.ansible.com/
.. _IBM Z: http://www.ibm.com/it-infrastructure/z/
.. _LinuxONE: http://www.ibm.com/it-infrastructure/linuxone/
.. _zhmcclient: https://github.com/zhmcclient/python-zhmcclient
Documentation
=============
This version of the project has its documentation on RTD:
http://zhmc-ansible-modules.readthedocs.io/en/stable/
Starting with version 0.9.0, the documentation is available on GitHub Pages:
https://zhmcclient.github.io/zhmc-ansible-modules/
Playbook examples
=================
Here are some examples for using the Ansible modules in this project:
Create a stopped partition
--------------------------
This task ensures that a partition with this name exists, is in the stopped
status and has certain property values.
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Ensure a partition exists and is stopped
zhmc_partition:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
name: "my partition 1"
state: stopped
properties:
description: "zhmc Ansible modules: partition 1"
ifl_processors: 2
initial_memory: 1024
maximum_memory: 1024
minimum_ifl_processing_weight: 50
maximum_ifl_processing_weight: 800
initial_ifl_processing_weight: 200
... # all partition properties are supported
Start a partition
-----------------
If this task is run after the previous one shown above, no properties need to
be specified. If it is possible that the partition first needs to be created,
then properties would be specified, as above.
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Ensure a partition exists and is active
zhmc_partition:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
name: "my partition 1"
state: active
properties:
... # see above
Delete a partition
------------------
This task ensures that a partition with this name does not exist. If it
currently exists, it is stopped (if needed) and deleted.
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Ensure a partition does not exist
zhmc_partition:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
name: "my partition 1"
state: absent
Create an HBA in a partition
----------------------------
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Ensure HBA exists in the partition
zhmc_hba:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
partition_name: "my partition 1"
name: "hba 1"
state: present
properties:
adapter_name: "fcp 1"
adapter_port: 0
description: The HBA to our storage
device_number: "023F"
... # all HBA properties are supported
Create a NIC in a partition
---------------------------
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Ensure NIC exists in the partition
zhmc_nic:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
partition_name: "my partition 1"
name: "nic 1"
state: present
properties:
adapter_name: "osa 1"
adapter_port: 1
description: The NIC to our data network
device_number: "013F"
... # all NIC properties are supported
Create a Virtual Function in a partition
----------------------------------------
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Ensure virtual function for zEDC adapter exists in the partition
zhmc_virtual_function:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
partition_name: "my partition 1"
name: "vf 1"
state: present
properties:
adapter_name: "zedc 1"
description: The virtual function for our accelerator adapter
device_number: "043F"
... # all VF properties are supported
Configure partition for booting from FCP LUN
--------------------------------------------
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Configure partition for booting via HBA
zhmc_partition:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
name: "my partition 1"
state: stopped
properties:
boot_device: storage-adapter
boot_storage_hba_name: "hba 1"
boot_logical_unit_number: "0001"
boot_world_wide_port_name: "00cdef01abcdef01"
Configure crypto config of a partition
--------------------------------------
.. code-block:: yaml
---
- hosts: localhost
tasks:
- name: Ensure crypto config for partition
zhmc_partition:
hmc_host: "10.11.12.13"
hmc_auth: "{{ hmc_auth }}"
cpc_name: P000S67B
name: "my partition 1"
state: stopped
properties:
crypto_configuration:
crypto_adapter_names:
- "crypto 1"
crypto_domain_configurations:
- domain_index: 17
access_mode: "control-usage"
- domain_index: 19
access_mode: "control"
Quickstart
==========
For installation instructions, see `Installation of zhmc-ansible-modules package
<http://zhmc-ansible-modules.readthedocs.io/en/stable/intro.html#installation>`_.
After having installed the zhmc-ansible-modules package, you can download and
run the example playbooks in `folder 'playbooks' of the Git repository
<https://github.com/zhmcclient/zhmc-ansible-modules/tree/master/playbooks>`_:
* ``create_partition.yml`` creates a partition with a NIC, HBA and virtual
function to an accelerator adapter.
* ``delete_partition.yml`` deletes a partition.
* ``vars_example.yml`` is an example variable file defining variables such as
CPC name, partition name, etc.
* ``vault_example.yml`` is an example password vault file defining variables
for authenticating with the HMC.
Before you run a playbook, copy ``vars_example.yml`` to ``vars.yml`` and
``vault_example.yml`` to ``vault.yml`` and change the variables in those files
as needed.
Then, run the example playbooks:
.. code-block:: text
$ ansible-playbook create_partition.yml
PLAY [localhost] **********************************************************
TASK [Gathering Facts] ****************************************************
ok: [127.0.0.1]
TASK [Ensure partition exists and is stopped] *****************************
changed: [127.0.0.1]
TASK [Ensure HBA exists in the partition] *********************************
changed: [127.0.0.1]
TASK [Ensure NIC exists in the partition] *********************************
changed: [127.0.0.1]
TASK [Ensure virtual function exists in the partition] ********************
changed: [127.0.0.1]
TASK [Configure partition for booting via HBA] ****************************
changed: [127.0.0.1]
PLAY RECAP ****************************************************************
127.0.0.1 : ok=6 changed=5 unreachable=0 failed=0
$ ansible-playbook delete_partition.yml
PLAY [localhost] **********************************************************
TASK [Gathering Facts] ****************************************************
ok: [127.0.0.1]
TASK [Ensure partition does not exist] ************************************
changed: [127.0.0.1]
PLAY RECAP ****************************************************************
127.0.0.1 : ok=2 changed=1 unreachable=0 failed=0
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/README.rst | README.rst |
.. Copyright 2017 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
Contributing
------------
Third party contributions to this project are welcome!
In order to contribute, create a `Git pull request`_, considering this:
.. _Git pull request: https://help.github.com/articles/using-pull-requests/
* Test is required.
* Each commit should only contain one "logical" change.
* A "logical" change should be put into one commit, and not split over multiple
commits.
* Large new features should be split into stages.
* The commit message should not only summarize what you have done, but explain
why the change is useful.
* The commit message must follow the format explained below.
What comprises a "logical" change is subject to sound judgement. Sometimes, it
makes sense to produce a set of commits for a feature (even if not large).
For example, a first commit may introduce a (presumably) compatible API change
without exploitation of that feature. With only this commit applied, it should
be demonstrable that everything is still working as before. The next commit may
be the exploitation of the feature in other components.
For further discussion of good and bad practices regarding commits, see:
* `OpenStack Git Commit Good Practice`_
* `How to Get Your Change Into the Linux Kernel`_
.. _OpenStack Git Commit Good Practice: https://wiki.openstack.org/wiki/GitCommitMessages
.. _How to Get Your Change Into the Linux Kernel: https://www.kernel.org/doc/Documentation/SubmittingPatches
Format of commit messages
~~~~~~~~~~~~~~~~~~~~~~~~~
A commit message must start with a short summary line, followed by a blank
line.
Optionally, the summary line may start with an identifier that helps
identifying the type of change or the component that is affected, followed by
a colon.
It can include a more detailed description after the summary line. This is
where you explain why the change was done, and summarize what was done.
It must end with the DCO (Developer Certificate of Origin) sign-off line in the
format shown in the example below, using your name and a valid email address of
yours. The DCO sign-off line certifies that you followed the rules stated in
`DCO 1.1`_. In short, you certify that you wrote the patch or otherwise have
the right to pass it on as an open-source patch.
.. _DCO 1.1: https://raw.githubusercontent.com/zhmcclient/zhmc-ansible-modules/master/DCO1.1.txt
We use `GitCop`_ during creation of a pull request to check whether the commit
messages in the pull request comply to this format.
If the commit messages do not comply, GitCop will add a comment to the pull
request with a description of what was wrong.
.. _GitCop: http://gitcop.com/
Example commit message:
.. code-block:: text
cookies: Add support for delivering cookies
Cookies are important for many people. This change adds a pluggable API for
delivering cookies to the user, and provides a default implementation.
Signed-off-by: Random J Developer <[email protected]>
Use ``git commit --amend`` to edit the commit message, if you need to.
Use the ``--signoff`` (``-s``) option of ``git commit`` to append a sign-off
line to the commit message with your name and email as known by Git.
If you like filling out the commit message in an editor instead of using
the ``-m`` option of ``git commit``, you can automate the presence of the
sign-off line by using a commit template file:
* Create a file outside of the repo (say, ``~/.git-signoff.template``)
that contains, for example:
.. code-block:: text
<one-line subject>
<detailed description>
Signed-off-by: Random J Developer <[email protected]>
* Configure Git to use that file as a commit template for your repo:
.. code-block:: text
git config commit.template ~/.git-signoff.template
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/CONTRIBUTING.rst | CONTRIBUTING.rst |
import sys
import os
from collections import namedtuple
from pprint import pprint
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
from ansible.errors import AnsibleError
Options = namedtuple('Options',
['connection', 'module_path', 'forks', 'become',
'become_method', 'become_user', 'check', 'diff'])
class ResultCallback(CallbackBase):
"""
Ansible callbacks that store the results in the object.
"""
def __init__(self):
self.status_ok = []
self.status_failed = []
self.status_unreachable = []
def v2_runner_on_ok(self, result):
"""
Called when a task completes successfully.
"""
host_name = result._host.get_name()
task_name = result._task.get_name()
result = result._result
status = dict(host_name=host_name, task_name=task_name, result=result)
self.status_ok.append(status)
print("Host '%s', Task '%s': Ok" % (host_name, task_name))
if task_name == 'debug':
# TODO: The output of the debug module does not get printed by the
# module itself, so we print it here. Find out why the debug module
# does not print.
print("Debug result:")
pprint(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
"""
Called when a task fails.
"""
host_name = result._host.get_name()
task_name = result._task.get_name()
status = dict(host_name=host_name, task_name=task_name,
result=result._result)
self.status_failed.append(status)
print("Host '%s', Task '%s': Failed" % (host_name, task_name))
def v2_runner_on_unreachable(self, result, ignore_errors=False):
"""
Called when a task fails because the host is unreachable.
"""
host_name = result._host.get_name()
task_name = result._task.get_name()
status = dict(host_name=host_name, task_name=task_name,
result=result._result)
self.status_unreachable.append(status)
print("Host '%s', Task '%s': Host unreachable" %
(host_name, task_name))
def rc_msg(rc):
"""
Return error message for a TaskQueueManager.run() return code.
"""
messages = {
TaskQueueManager.RUN_ERROR: "Play failed",
TaskQueueManager.RUN_FAILED_HOSTS: "Play failed on some hosts",
TaskQueueManager.RUN_UNREACHABLE_HOSTS: "Unreachable hosts",
TaskQueueManager.RUN_FAILED_BREAK_PLAY: "Play failed (breaking)",
}
if rc == TaskQueueManager.RUN_UNKNOWN_ERROR:
return "Unknown error"
msg_strings = []
for mask in messages:
if rc & mask:
msg_strings.append(messages[mask])
return ', '.join(msg_strings)
def main():
my_dir = os.path.dirname(sys.argv[0])
zhmc_module_dir = os.path.join(my_dir, 'zhmc_ansible_modules')
zhmc_playbooks_dir = os.path.join(my_dir, 'playbooks')
inventory_file = '/etc/ansible/hosts'
options = Options(connection='local', module_path=[zhmc_module_dir],
forks=100, become=None, become_method=None,
become_user=None, check=False, diff=False)
passwords = dict(vault_pass=None)
results_callback = ResultCallback()
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=[inventory_file])
variable_manager = VariableManager(loader=loader, inventory=inventory)
# The playbook source
play_source = dict(
name="Get facts for a Z partition",
hosts='localhost',
gather_facts='no',
vars_files=[
os.path.join(zhmc_playbooks_dir, 'vars.yml'),
os.path.join(zhmc_playbooks_dir, 'vault.yml'),
],
tasks=[
dict(
name="Get partition facts",
action=dict(
module='zhmc_partition',
args=dict(
hmc_host="{{hmc_host}}",
hmc_auth="{{hmc_auth}}",
cpc_name="{{cpc_name}}",
name="{{partition_name}}",
state='facts',
),
),
register='part1_result',
),
dict(
action=dict(
module='debug',
args=dict(
msg="Gathered facts for partition "
"'{{part1_result.partition.name}}': "
"status='{{part1_result.partition.status}}'",
),
),
),
],
)
play = Play().load(
play_source,
variable_manager=variable_manager,
loader=loader)
tqm = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback=results_callback,
)
try:
rc = tqm.run(play)
except AnsibleError as exc:
print("Error: AnsibleError: %s" % exc)
return 2
if rc == TaskQueueManager.RUN_OK:
return 0
elif rc & TaskQueueManager.RUN_FAILED_HOSTS:
status_list = results_callback.status_failed
assert len(status_list) == 1
status = status_list[0]
host_name = status['host_name']
task_name = status['task_name']
result = status['result']
try:
msg = result['msg']
except Exception:
print("Internal error: Unexpected format of result: %r" %
result)
return 2
print("Error: Task '%s' failed on host '%s': %s" %
(task_name, host_name, msg))
return 1
elif rc & TaskQueueManager.RUN_UNREACHABLE_HOSTS:
status_list = results_callback.status_unreachable
assert len(status_list) == 1
status = status_list[0]
host_name = status['host_name']
task_name = status['task_name']
result = status['result']
try:
msg = result['msg']
except Exception:
print("Internal error: Unexpected format of result: %r" %
result)
return 2
print("Error: Task '%s' failed because host '%s' is unreachable: "
"%s" % (task_name, host_name, msg))
return 1
else:
print("Internal error: Unexpected rc=%s: %s" % (rc, rc_msg(rc)))
return 2
finally:
if tqm is not None:
tqm.cleanup()
return 0
if __name__ == '__main__':
sys.exit(main()) | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/get_facts.py | get_facts.py |
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
nationality, personal appearance, race, religion, or sexual identity and
orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting a zhmc-ansible-modules project maintainer,
Juergen Leopold [email protected], and/or Andreas Maier [email protected]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/CODE_OF_CONDUCT.md | CODE_OF_CONDUCT.md |
.. Copyright 2017 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
Contributing
------------
Third party contributions to this project are welcome!
In order to contribute, create a `Git pull request`_, considering this:
.. _Git pull request: https://help.github.com/articles/using-pull-requests/
* Test is required.
* Each commit should only contain one "logical" change.
* A "logical" change should be put into one commit, and not split over multiple
commits.
* Large new features should be split into stages.
* The commit message should not only summarize what you have done, but explain
why the change is useful.
* The commit message must follow the format explained below.
What comprises a "logical" change is subject to sound judgement. Sometimes, it
makes sense to produce a set of commits for a feature (even if not large).
For example, a first commit may introduce a (presumably) compatible API change
without exploitation of that feature. With only this commit applied, it should
be demonstrable that everything is still working as before. The next commit may
be the exploitation of the feature in other components.
For further discussion of good and bad practices regarding commits, see:
* `OpenStack Git Commit Good Practice`_
* `How to Get Your Change Into the Linux Kernel`_
.. _OpenStack Git Commit Good Practice: https://wiki.openstack.org/wiki/GitCommitMessages
.. _How to Get Your Change Into the Linux Kernel: https://www.kernel.org/doc/Documentation/SubmittingPatches
Format of commit messages
~~~~~~~~~~~~~~~~~~~~~~~~~
A commit message must start with a short summary line, followed by a blank
line.
Optionally, the summary line may start with an identifier that helps
identifying the type of change or the component that is affected, followed by
a colon.
It can include a more detailed description after the summary line. This is
where you explain why the change was done, and summarize what was done.
It must end with the DCO (Developer Certificate of Origin) sign-off line in the
format shown in the example below, using your name and a valid email address of
yours. The DCO sign-off line certifies that you followed the rules stated in
`DCO 1.1`_. In short, you certify that you wrote the patch or otherwise have
the right to pass it on as an open-source patch.
.. _DCO 1.1: https://raw.githubusercontent.com/zhmcclient/zhmc-ansible-modules/master/DCO1.1.txt
We use `GitCop`_ during creation of a pull request to check whether the commit
messages in the pull request comply to this format.
If the commit messages do not comply, GitCop will add a comment to the pull
request with a description of what was wrong.
.. _GitCop: http://gitcop.com/
Example commit message:
.. code-block:: text
cookies: Add support for delivering cookies
Cookies are important for many people. This change adds a pluggable API for
delivering cookies to the user, and provides a default implementation.
Signed-off-by: Random J Developer <[email protected]>
Use ``git commit --amend`` to edit the commit message, if you need to.
Use the ``--signoff`` (``-s``) option of ``git commit`` to append a sign-off
line to the commit message with your name and email as known by Git.
If you like filling out the commit message in an editor instead of using
the ``-m`` option of ``git commit``, you can automate the presence of the
sign-off line by using a commit template file:
* Create a file outside of the repo (say, ``~/.git-signoff.template``)
that contains, for example:
.. code-block:: text
<one-line subject>
<detailed description>
Signed-off-by: Random J Developer <[email protected]>
* Configure Git to use that file as a commit template for your repo:
.. code-block:: text
git config commit.template ~/.git-signoff.template
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/CONTRIBUTING.rst | CONTRIBUTING.rst |
.. Copyright 2017-2018 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
.. _`Change log`:
Change log
----------
Moving to Ansible Galaxy
^^^^^^^^^^^^^^^^^^^^^^^^
Starting with version 0.9.0, the zhmc Ansible modules are no longer distributed
as the
`zhmc-ansible-modules package on Pypi <https://pypi.org/project/zhmc-ansible-modules/>`_,
but as the
`ibm.ibm_zhmc collection on Galaxy <https://galaxy.ansible.com/ibm/ibm_zhmc/>`_.
Version 0.8.4
^^^^^^^^^^^^^
Released: 2020-12-14
**Bug fixes:**
* Fixed the incorrect statement about support for Python 3.4. Ansible never
officially supported Python 3.4.
**Enhancements:**
* Migrated from Travis and Appveyor to GitHub Actions. This required several
changes in package dependencies for development.
* Increased the development status shown for the package on Pypi to
Production/Stable. (Issue #285)
Version 0.8.3
^^^^^^^^^^^^^
Released: 2020-11-11
**Bug fixes:**
* Fixed percent-encoding in the 'zhmc_storage_group' and 'zhmc_storage_volume'
modules.
* Docs: Increased supported Ansible versions from 2.0 or higher to be 2.4 or
higher. This is what is currently tested. See issue #209.
**Enhancements:**
* In the zhmc_nic module, updated the definition of NIC properties to the z15
machine generation. This makes the 'mac_address' property writeable, and adds
the 'vlan_type', 'function_number' and 'function_range' properties.
* zhmc_cpc: Added an artificial property 'storage-groups' to the output
that shows the storage groups attached to the partition, with only a subset
of their properties.
* zhmc_partition: Added an artificial property 'storage-groups' to the output
that shows the storage groups attached to the partition, with all of their
properties and artificial properties as in the result of zhmc_storage_group.
This is enabled by the new boolean input parameter 'expand_storage_groups'.
* zhmc_partition: Added an artificial property 'crypto-adapters' to the
'crypto-configuration' property, showing the adapter properties of the
crypto adapters attached to the partition, with all of their properties and
artificial properties as in the result of zhmc_adapter. This is enabled by
the new boolean input parameter 'expand_crypto_adapters'.
* zhmc_partition: Added artificial properties to the 'nics' property:
* 'adapter-name': Name of the adapter backing the NIC
* 'adapter-port': Port index on the adapter backing the NIC
* 'adapter-id': Adapter ID (PCHID) of the adapter backing the NIC
* Examples: Added an example playbook 'get_cpc_io.yml' which retrieves
information about a CPC in DPM mode and its I/O configuration and
creates a markdown file showing the result.
* Added support in the zhmc_crypto_attachment module for specifying crypto
adapters by name instead of just their count. (See issue #187)
Version 0.8.2
^^^^^^^^^^^^^
Released: 2020-09-22
**Bug fixes:**
* Fixed ParameterError raised when creating NICs on CNA adapter ports.
Version 0.8.1
^^^^^^^^^^^^^
Released: 2020-09-09
**Bug fixes:**
* Fixed AttributeError when using the zhmc_adapter module to create a
HiperSockets adapter. (see issue #141)
Version 0.8.0
^^^^^^^^^^^^^
Released: 2019-04-02
**Bug fixes:**
* Fixed an issue in the zhmc_crypto_attachment module where the incorrect
crypto adapter was picked, leading to a subsequent crypto conflict
when starting the partition. See issue #112.
**Enhancements:**
* Improved the quaity of error messages in the zhmc_crypto_attachment module.
Version 0.7.0
^^^^^^^^^^^^^
Released: 2019-02-20
**Incompatible changes:**
* Temporarily disabled the retrieval of full properties in the result data
of the zhmc_adapter module.
**Bug fixes:**
* Docs: Fixed change log of 0.6.0 (see the 0.6.0 section below).
**Enhancements:**
* Renovated the logging:
- Added support for the log_file parameter to all modules.
- Changed the format of the log lines.
- Set log level also when no log_file is specified, causing
the logs to be propagated to the root logger.
Version 0.6.0
^^^^^^^^^^^^^
Released: 2019-01-07
Fixed this change log in 0.6.1 and 0.7.0
**Bug fixes:**
* Fixed dependency to zhmcclient package to be >=0.20.0, instead
of using its master branch from the github repo.
* Updated the 'requests' package to 2.20.0 to fix the following vulnerability:
https://nvd.nist.gov/vuln/detail/CVE-2018-18074
* Added support for Python 3.7. This required increasing the minimum version
of Ansible from 2.2.0.0 to 2.4.0.0.
This also removes the dependency on the 'pycrypto' package, which has
vulnerabilities and is no longer maintained since 2013. Ansible uses the
'cryptography' package, instead. See issue #66.
* The `crypto_number` property of Adapter is an integer property, and thus the
Ansible module `zhmc_adapter` needs to change the string passed by Ansible
back to an integer. It did that correctly but only for the `properties`
input parameter, and not for the `match` input parameter. The type conversions
are now applied for all properties of Adapter also for the `match` parameter.
* The dictionary to check input properties for the `zhmc_cpc` module had the
`acceptable_status` property written with a hyphen instead of underscore.
This had the effect that it was rejected as non-writeable when specifying
it as input.
**Enhancements:**
* Added support for managing CPCs by adding a `zhmc_cpc` Ansible module.
The module allows setting writeable properties of a CPC in an idempotent way,
and to gather facts for a CPC (i.e. all of its properties including a few
artificial ones). See issue #82.
* Added support for managing adapters by adding a `zhmc_adapter` Ansible
module. The module allows setting writeable properties of an adapter,
changing the adapter type for FICON Express adapters, and changing the
crypto type for Crypto Express adapters, all in an idempotent way.
It also allows gathering facts for an adapter (i.e. all of its properties#
including a few artificial ones).
See issue #83.
* Added a `zhmc_crypto_attachment` Ansible module, which manages the attachment
of crypto adapters and of crypto domains to partitions in an idempotent way.
This was already supported in a less flexible and non-idempotent way by the
`zhmc_partition` Ansible module.
* Added support for adjusting the value of the `ssc_ipv4_gateway` input property
for the `zhmc_partition` module to `None` if specified as the empty string.
This allows defaulting the value more easily in playbooks.
* Docs: Improved and fixed the documentation how to release a version
and how to start a new version.
Version 0.5.0
^^^^^^^^^^^^^
Released: 2018-10-24
**Incompatible changes:**
* Changed 'make setup' back to 'make develop' for consistency with the other
zhmcclient projects.
**Bug fixes:**
* Several fixes in the make process and package dependencies.
* Synced package dependencies with zhmcclient project.
**Enhancements:**
* Added support for DPM storage groups, attachments and volumes, by adding
new modules 'zhmc_storage_group', 'zhmc_storage_group_attachment', and
'zhmc_storage_volume'. Added several playbooks as examples.
Version 0.4.0
^^^^^^^^^^^^^
Released: 2018-03-15
**Bug fixes:**
* Fixed the bug that a TypeError was raised when setting the 'ssc_dns_servers'
property for a Partition. The property value is a list of strings, and
lists of values were not supported previously. Extended the function test
cases for partitions accordingly. (Issue #34).
* Fixed that the "type" property for Partitions could not be specified.
It is valid for Partition creation, and the only restriction is that
its value cannot be changed once the Partition exists. Along with fixing
the logic for such create-only properties, the same issue was also fixed
for the adapter port related properties of HBAs. (Issue #31).
* Improved the logic for handling create+update properties in case
the resource does not exist, such that they are no longer updated
in addition to being set during creation. The logic still supports
updating as an alternative if the resource does not exist, for
update-only properties (e.g. several properties in Partitions).
(Fixed as part of issue #31).
* Fixed the issue that a partition in "terminated" or "paused" status
could not be made absent (i.e. deleted). Now, the partition is
stopped which should bring it into "stopped" status, and then
deleted. (Issue #29).
**Enhancements:**
* Added get_facts.py script to examine usage of the Ansible 2.0 API.
* Added support for gathering partition and child facts.
The fact support is invoked by specifying state=facts.
The fact support is implemented by returning the partition properties
in the result. The returned partition properties are enriched by adding
properties 'hbas', 'nics', 'virtual-functions' that are a list
of the properties of the respective child elements of that partition.
(Issue #32).
Version 0.3.0
^^^^^^^^^^^^^
Released: 2017-08-16
**Incompatible changes:**
**Deprecations:**
**Bug fixes:**
**Enhancements:**
* Added support for specifying integer-typed and float-typed
properties of Partitions, NICs, HBAs, and VFs also as decimal
strings in the module input.
* Specifying string typed properties of Partitions, NICs, HBAs,
and VFs with Unicode characters no longer performs an unnecessary
property update.
**Dependencies:**
* Increased minimum Ansible release from 2.0.0.1 to 2.2.0.0.
* Upgraded zhmcclient requirement to 0.15.0
Version 0.2.0
^^^^^^^^^^^^^^
Released: 2017-07-20
This is the initial release.
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/changes.rst | changes.rst |
.. Copyright 2017 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
zhmc Ansible modules
====================
.. toctree::
:maxdepth: 2
intro
gen/list_of_all_modules
gen/common_return_values
development
changes
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/index.rst | index.rst |
.. Copyright 2017 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
.. _`Development`:
Development
===========
This section only needs to be read by developers of the zhmc-ansible-modules
project. People that want to make a fix or develop some extension, and people
that want to test the project are also considered developers for the purpose of
this section.
.. _`Repository`:
Repository
----------
The repository for the zhmc-ansible-modules project is on GitHub:
https://github.com/zhmcclient/zhmc-ansible-modules
.. _`Setting up the development environment`:
Setting up the development environment
--------------------------------------
The development environment is pretty easy to set up.
Besides having a supported operating system with a supported Python version
(see :ref:`Supported environments`), it is recommended that you set up a
`virtual Python environment`_.
.. _virtual Python environment: http://docs.python-guide.org/en/latest/dev/virtualenvs/
Then, with a virtual Python environment active, clone the Git repo of this
project and prepare the development environment with ``make setup``:
.. code-block:: text
$ git clone [email protected]:zhmcclient/zhmc-ansible-modules.git
$ cd zhmc-ansible-modules
$ make setup
This will install all prerequisites the project needs for its development.
Generally, this project uses Make to do things in the currently active
Python environment. The command ``make help`` (or just ``make``) displays a
list of valid Make targets and a short description of what each target does.
.. _`Building the documentation`:
Building the documentation
--------------------------
The ReadTheDocs (RTD) site is used to publish the documentation for the
zhmc-ansible-modules project at http://zhmc-ansible-modules.readthedocs.io/
This page automatically gets updated whenever the ``master`` branch of the
Git repo for this package changes.
In order to build the documentation locally from the Git work directory, issue:
.. code-block:: text
$ make docs
The top-level document to open with a web browser will be
``build/docs/html/index.html``.
.. _`Testing`:
Testing
-------
To run unit tests in the currently active Python environment, issue one of
these example variants of ``make test``:
.. code-block:: text
$ make test # Run all unit tests
$ TESTCASES=test_resource.py make test # Run only this test source file
$ TESTCASES=TestInit make test # Run only this test class
$ TESTCASES="TestInit or TestSet" make test # py.test -k expressions are possible
To run the unit tests and some more commands that verify the project is in good
shape in all supported Python environments, use Tox:
.. code-block:: text
$ tox # Run all tests on all supported Python versions
$ tox -e py27 # Run all tests on Python 2.7
$ tox -e py27 test_resource.py # Run only this test source file on Python 2.7
$ tox -e py27 TestInit # Run only this test class on Python 2.7
$ tox -e py27 TestInit or TestSet # py.test -k expressions are possible
The positional arguments of the ``tox`` command are passed to ``py.test`` using
its ``-k`` option. Invoke ``py.test --help`` for details on the expression
syntax of its ``-k`` option.
.. _`Contributing`:
.. include:: CONTRIBUTING.rst
.. _`Releasing a version`:
Releasing a version
-------------------
This section shows the steps for releasing a version to `PyPI
<https://pypi.python.org/>`_.
It covers all variants of versions that can be released:
* Releasing the master branch as a new major or minor version (M+1.0.0 or M.N+1.0)
* Releasing a stable branch as a new update version (M.N.U+1)
This description assumes that you are authorized to push to the upstream repo
at https://github.com/zhmcclient/zhmc-ansible-modules and that the upstream repo
has the remote name ``origin`` in your local clone.
1. Switch to your work directory of your local clone of the
zhmc-ansible-modules Git repo and perform the following steps in that
directory.
2. Set shell variables for the version and branch to be released:
* ``MNU`` - Full version number M.N.U this release should have
* ``MN`` - Major and minor version numbers M.N of that full version
* ``BRANCH`` - Name of the branch to be released
When releasing the master branch (e.g. as version ``0.6.0``):
.. code-block:: text
MNU=0.6.0
MN=0.6
BRANCH=master
When releasing a stable branch (e.g. as version ``0.5.1``):
.. code-block:: text
MNU=0.5.1
MN=0.5
BRANCH=stable_$MN
3. Check out the branch to be released, make sure it is up to date with upstream, and
create a topic branch for the version to be released:
.. code-block:: text
git status # Double check the work directory is clean
git checkout $BRANCH
git pull
git checkout -b release_$MNU
4. Edit the change log:
.. code-block:: text
vi docs/changes.rst
and make the following changes in the section of the version to be released:
* Finalize the version to the version to be released.
* Remove the statement that the version is in development.
* Change the release date to today´s date.
* Make sure that all changes are described.
* Make sure the items shown in the change log are relevant for and understandable
by users.
* In the "Known issues" list item, remove the link to the issue tracker and add
text for any known issues you want users to know about.
* Remove all empty list items in that section.
5. Commit your changes and push them upstream:
.. code-block:: text
git add docs/changes.rst
git commit -sm "Release $MNU"
git push --set-upstream origin release_$MNU
6. On GitHub, create a Pull Request for branch ``release_$MNU``. This will trigger the
CI runs in Travis and Appveyor.
Important: When creating Pull Requests, GitHub by default targets the ``master``
branch. If you are releasing a stable branch, you need to change the target branch
of the Pull Request to ``stable_M.N``.
7. On GitHub, close milestone ``M.N.U``.
8. Perform a complete test:
.. code-block:: text
tox
This should not fail because the same tests have already been run in the
Travis CI. However, run it for additional safety before the release.
* If this test fails, fix any issues until the test succeeds. Commit the
changes and push them upstream:
.. code-block:: text
git add <changed-files>
git commit -sm "<change description with details>"
git push
Wait for the automatic tests to show success for this change.
9. On GitHub, once the checks for this Pull Request succeed:
* Merge the Pull Request (no review is needed).
Because this updates the ``stable_M.N`` branch, it triggers an RTD docs build of
its stable version. However, because the git tag for this version is not assigned
yet, this RTD build will show an incorrect version (a dev version based on the
previous version tag). This will be fixed in a subsequent step.
* Delete the branch of the Pull Request (``release_M.N.U``)
10. Checkout the branch you are releasing, update it from upstream, and delete the local
topic branch you created:
.. code-block:: text
git checkout $BRANCH
git pull
git branch -d release_$MNU
11. Tag the version:
Important: This is the basis on which 'pbr' determines the package version. The tag
string must be exactly the version string ``M.N.U``.
Create a tag for the new version and push the tag addition upstream:
.. code-block:: text
git status # Double check the branch to be released is checked out
git tag $MNU
git push --tags
The pushing of the tag triggers another RTD docs build of its stable version, this time
with the correct version as defined in the tag.
If the previous commands fail because this tag already exists for some reason, delete
the tag locally and remotely:
.. code-block:: text
git tag --delete $MNU
git push --delete origin $MNU
and try again.
12. On RTD, verify that it shows the correct version for its stable version:
RTD stable version: https://zhmc-ansible-modules.readthedocs.io/en/stable.
If it does not, trigger a build of RTD version "stable" on the RTD project
page:
RTD build page: https://readthedocs.org/projects/zhmc-ansible-modules/builds/
Once that build is complete, verify again.
13. On GitHub, edit the new tag ``M.N.U``, and create a release description on it. This
will cause it to appear in the Release tab.
You can see the tags in GitHub via Code -> Releases -> Tags.
14. Do a fresh install of this version in your active Python environment. This ensures
that 'pbr' determines the correct version. Otherwise, it may determine some development
version.
.. code-block:: text
make clobber install
make help # Double check that it shows version ``M.N.U``
15. Upload the package to PyPI:
.. code-block:: text
make upload
This will show the package version and will ask for confirmation.
**Important:** Double check that the correct package version (``M.N.U``,
without any development suffix) is shown.
**Attention!!** This only works once for each version. You cannot
re-release the same version to PyPI, or otherwise update it.
Verify that the released version arrived on PyPI:
https://pypi.python.org/pypi/zhmc-ansible-modules/
16. If you released the master branch, it needs a new fix stream.
Create a branch for its fix stream and push it upstream:
.. code-block:: text
git status # Double check the branch to be released is checked out
git checkout -b stable_$MN
git push --set-upstream origin stable_$MN
Log on to the
`RTD project zhmc-ansible-modules <https://readthedocs.org/projects/zhmc-ansible-modules/versions>`_
and activate the new version (=branch) ``stable_M.N`` as a version to be
built.
.. _`Starting a new version`:
Starting a new version
----------------------
This section shows the steps for starting development of a new version.
These steps may be performed right after the steps for
:ref:`releasing a version`, or independently.
This section covers all variants of new versions:
* A new major or minor version for new development based upon the master branch.
* A new update (=fix) version based on a stable branch.
This description assumes that you are authorized to push to the upstream repo
at https://github.com/zhmcclient/zhmc-ansible-modules and that the upstream repo
has the remote name ``origin`` in your local clone.
1. Switch to your work directory of your local clone of the zhmc-ansible-modules Git
repo and perform the following steps in that directory.
2. Set shell variables for the version to be started and its base branch:
* ``MNU`` - Full version number M.N.U of the new version to be started
* ``MN`` - Major and minor version numbers M.N of that full version
* ``BRANCH`` - Name of the branch the new version is based upon
When starting a (major or minor) version (e.g. ``0.6.0``) based on the master branch:
.. code-block:: text
MNU=0.6.0
MN=0.6
BRANCH=master
When starting an update (=fix) version (e.g. ``0.5.1``) based on a stable branch:
.. code-block:: text
MNU=0.5.1
MN=0.5
BRANCH=stable_$MN
3. Check out the branch the new version is based on, make sure it is up to
date with upstream, and create a topic branch for the new version:
.. code-block:: text
git status # Double check the work directory is clean
git checkout $BRANCH
git pull
git checkout -b start_$MNU
4. Edit the change log:
.. code-block:: text
vi docs/changes.rst
and insert the following section before the top-most section:
.. code-block:: text
Version 0.6.0
^^^^^^^^^^^^^
Released: not yet
**Incompatible changes:**
**Deprecations:**
**Bug fixes:**
**Enhancements:**
**Known issues:**
* See `list of open issues`_.
.. _`list of open issues`: https://github.com/zhmcclient/zhmc-ansible-modules/issues
5. Commit your changes and push them upstream:
.. code-block:: text
git add docs/changes.rst
git commit -sm "Start $MNU"
git push --set-upstream origin start_$MNU
6. On GitHub, create a Pull Request for branch ``start_M.N.U``.
Important: When creating Pull Requests, GitHub by default targets the ``master``
branch. If you are starting based on a stable branch, you need to change the
target branch of the Pull Request to ``stable_M.N``.
7. On GitHub, create a milestone for the new version ``M.N.U``.
You can create a milestone in GitHub via Issues -> Milestones -> New
Milestone.
8. On GitHub, go through all open issues and pull requests that still have
milestones for previous releases set, and either set them to the new
milestone, or to have no milestone.
9. On GitHub, once the checks for this Pull Request succeed:
* Merge the Pull Request (no review is needed)
* Delete the branch of the Pull Request (``start_M.N.U``)
10. Checkout the branch the new version is based on, update it from upstream, and
delete the local topic branch you created:
.. code-block:: text
git checkout $BRANCH
git pull
git branch -d start_$MNU
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/development.rst | development.rst |
.. Copyright 2017 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
..
.. _`Introduction`:
Introduction
============
Moving to Ansible Galaxy
------------------------
Starting with version 0.9.0, the zhmc Ansible modules are no longer distributed
as the
`zhmc-ansible-modules package on Pypi <https://pypi.org/project/zhmc-ansible-modules/>`_,
but as the
`ibm.ibm_zhmc collection on Galaxy <https://galaxy.ansible.com/ibm/ibm_zhmc/>`_.
.. _`What this package provides`:
What this package provides
--------------------------
The zhmc-ansible-modules Python package contains `Ansible`_ modules that can
manage platform resources on `IBM Z`_ and `LinuxONE`_ machines that are in
the Dynamic Partition Manager (DPM) operational mode.
The goal of this package is to be able to utilize the power and ease of use
of Ansible for the management of IBM Z platform resources.
The IBM Z resources that can be managed include Partitions, HBAs, NICs, and
Virtual Functions.
The Ansible modules in the zhmc-ansible-modules package are fully
`idempotent <http://docs.ansible.com/ansible/latest/glossary.html#term-idempotency>`_,
following an important principle for Ansible modules.
The idempotency of a module allows Ansible playbooks to specify the desired end
state for a resource, regardless of what the current state is. For example, a
IBM Z partition can be specified to have ``state=active`` which means that
it must exist and be in the active operational status. Depending on the current
state of the partition, actions will be taken by the module to reach this
desired end state: If the partition does not exist, it will be created and
started. If it exists but is not active, it will be started. If it is already
active, nothing will be done. Other initial states including transitional
states such as starting or stopping also will be taken care of.
The idempotency of modules makes Ansible playbooks restartable: If an error
happens and some things have been changed already, the playbook can simply be
re-run and will automatically do the right thing, because the initial state
does not matter for reaching the desired end state.
The Ansible modules in the zhmc-ansible-modules package are written in Python
and interact with the Web Services API of the Hardware Management Console (HMC)
of the machines to be managed, by using the API of the `zhmcclient`_ Python
package.
.. _Ansible: https://www.ansible.com/
.. _IBM Z: http://www.ibm.com/it-infrastructure/z/
.. _LinuxONE: http://www.ibm.com/it-infrastructure/linuxone/
.. _zhmcclient: http://python-zhmcclient.readthedocs.io/en/stable/
.. _`Supported environments`:
Supported environments
----------------------
The Ansible modules in the zhmc-ansible-modules package are supported in these
environments:
* Ansible versions: 2.4 or higher
* Python versions: 2.7, 3.5 or higher
* Operating systems running Ansible: Linux, OS-X
* Machine generations: z13/z13s/Emperor/Rockhopper or higher
.. _`Installation`:
Installation
------------
The system Ansible is installed on is called the "control system". This is
where Ansible commands (such as ``ansible-playbook``) are invoked.
Ansible is written in Python and invokes Ansible modules always as executables,
even when they are also written in Python. Therefore, Ansible modules
implemented in Python are run as Python scripts and are not imported as Python
modules.
The standard installation is that Ansible is installed as an operating system
package and uses the existing system Python (version 2). The Ansible modules
then also use the system Python.
As an alternative to the standard installation, it is possible to use a
`virtual Python environment`_ for Ansible itself and for Ansible modules
written in Python. Using a virtual Python environment has the main advantages
that it minimizes the risk of incompatibilities between Python packages because
the virtual environment contains only the packages that are needed for the
specific use case, and that it does not pollute your system Python installation
with other Python packages, keeping the risk of incompatibilities away from
your system Python.
.. _`virtual Python environment`: http://docs.python-guide.org/en/latest/dev/virtualenvs/
The following sections describe these two installation methods.
Standard installation with system Python
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
All commands shown are to be executed in a bash shell on the control system.
.. _`Installing the Control Machine`: http://docs.ansible.com/ansible/latest/intro_installation.html#installing-the-control-machine
1. Install Ansible as an operating system package on the control system.
For details, see `Installing the Control Machine`_.
2. Install the zhmc-ansible-modules package into the system Python:
.. code-block:: bash
$ sudo pip install zhmc-ansible-modules
This will also install its dependent Python packages.
3. Set up the Ansible module search path
Find out the install location of the zhmc-ansible-modules package:
.. code-block:: bash
$ pip show zhmc-ansible-modules | grep Location
Location: /usr/local/lib/python2.7/dist-packages
The Ansible module search path is the ``zhmc_ansible_modules`` directory in
the location shown:
.. code-block:: text
/usr/local/lib/python2.7/dist-packages/zhmc_ansible_modules
Note that the Python package name is ``zhmc-ansible-modules`` while the
package directory is ``zhmc_ansible_modules``.
Set the Ansible module search path using one of these options:
a) via the Ansible config file:
Edit the Ansible config file ``/etc/ansible/ansible.cfg`` to contain the
following line:
.. code-block:: text
library = /usr/local/lib/python2.7/dist-packages/zhmc_ansible_modules
b) via an environment variable:
Edit your ``~/.bashrc`` file to contain the following line:
.. code-block:: text
export ANSIBLE_LIBRARY=/usr/local/lib/python2.7/dist-packages/zhmc_ansible_modules
and source the file to set it in your current shell:
.. code-block:: bash
$ . ~/.bashrc
Alternative installation with virtual Python environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _virtualenv: https://virtualenv.pypa.io/
This section describes the installation of Ansible and the Ansible modules in
the zhmc-ansible-modules package into a virtual Python environment that is set
up using `virtualenv`_.
This installation method utilizes the ability of Ansible to configure the
Python environment it uses, and configures it to use the active Python (which
can be a virtual Python environment or the system Python).
All commands shown are to be executed in a bash shell on the control system.
1. Install Ansible as an operating system package on the control system.
For details, see `Installing the Control Machine`_.
2. Create a shell script that invokes the active Python.
Adjust the file name and path for the shell script in the ``python_script``
variable as needed, the only requirement is that the shell script must be
found in the PATH:
.. code-block:: bash
$ python_script=$HOME/local/bin/env_python
$ cat >$python_script <<'EOT'
#!/bin/bash
py=$(which python)
$py "$@"
EOT
$ chmod 755 $python_script
3. Configure Ansible to invoke Python via the new shell script (using the
``python_script`` variable from the previous step):
.. code-block:: bash
$ sudo tee -a /etc/ansible/hosts >/dev/null <<EOT
[local:vars]
ansible_python_interpreter=$python_script
EOT
4. Create a shell script that sets the ``ANSIBLE_LIBRARY`` environment
variable to the location of the zhmc-ansible-modules package found in the
active Python environment.
Adjust the file name and path for the shell script in the ``library_script``
variable as needed, the only requirement is that the shell script must be
found in the PATH:
.. code-block:: bash
$ library_script=$HOME/local/bin/setup_ansible_library
$ cat >$library_script <<'EOT'
#!/bin/bash
zhmc_dir=$(dirname $(python -c "import zhmc_ansible_modules as m; print(m.__file__)"))
export ANSIBLE_LIBRARY=$zhmc_dir
EOT
$ chmod 755 $library_script
5. Create a virtual Python environment for Python 2.7 and activate it.
.. code-block:: bash
$ mkvirtualenv myenv
Note: Using the command shown requires the ``virtualenvwrapper`` package.
6. Install the zhmc-ansible-modules Python package into the active virtual
Python environment:
.. code-block:: bash
(myenv)$ pip install zhmc-ansible-modules
This will also install its dependent Python packages.
5. Set the ANSIBLE_LIBRARY environment variable by sourcing the script created
in step 4:
.. code-block:: bash
$ . setup_ansible_library
This must be done after each switch (or deactivation) of the active Python
environment and before any Ansible command (that uses these modules) is
invoked.
Verification of the installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can verify that the zhmc-ansible-modules package and its dependent packages
are installed correctly by running an example playbook in check mode:
.. code-block:: bash
$ ansible-playbook playbooks/create_partition.yml --check
PLAY [localhost] ***********************************************************
TASK [Gathering Facts] *****************************************************
ok: [127.0.0.1]
TASK [Ensure partition exists and is stopped] ******************************
changed: [127.0.0.1]
TASK [Ensure HBA exists in the partition] **********************************
changed: [127.0.0.1]
TASK [Ensure NIC exists in the partition] **********************************
changed: [127.0.0.1]
TASK [Ensure virtual function exists in the partition] *********************
changed: [127.0.0.1]
TASK [Configure partition for booting via HBA] *****************************
changed: [127.0.0.1]
PLAY RECAP *****************************************************************
127.0.0.1 : ok=6 changed=5 unreachable=0 failed=0
.. _`Example playbooks`:
Example playbooks
-----------------
After having installed the zhmc-ansible-modules package, you can download and
run the example playbooks in `folder ``playbooks`` of the Git repository
<https://github.com/zhmcclient/zhmc-ansible-modules/tree/master/playbooks>`_:
* ``create_partition.yml`` creates a partition with a NIC, HBA and virtual
function to an accelerator adapter.
* ``delete_partition.yml`` deletes a partition.
* ``vars_example.yml`` is an example variable file defining variables such as
CPC name, partition name, etc.
* ``vault_example.yml`` is an example password vault file defining variables
for authenticating with the HMC.
Before you run a playbook, copy ``vars_example.yml`` to ``vars.yml`` and
``vault_example.yml`` to ``vault.yml`` and change the variables in those files
as needed.
Then, run the playbooks:
.. code-block:: text
$ ansible-playbook create_partition.yml
PLAY [localhost] **********************************************************
TASK [Gathering Facts] ****************************************************
ok: [127.0.0.1]
TASK [Ensure partition exists and is stopped] *****************************
changed: [127.0.0.1]
TASK [Ensure HBA exists in the partition] *********************************
changed: [127.0.0.1]
TASK [Ensure NIC exists in the partition] *********************************
changed: [127.0.0.1]
TASK [Ensure virtual function exists in the partition] ********************
changed: [127.0.0.1]
TASK [Configure partition for booting via HBA] ****************************
changed: [127.0.0.1]
PLAY RECAP ****************************************************************
127.0.0.1 : ok=6 changed=5 unreachable=0 failed=0
$ ansible-playbook delete_partition.yml
PLAY [localhost] **********************************************************
TASK [Gathering Facts] ****************************************************
ok: [127.0.0.1]
TASK [Ensure partition does not exist] ************************************
changed: [127.0.0.1]
PLAY RECAP ****************************************************************
127.0.0.1 : ok=2 changed=1 unreachable=0 failed=0
.. _`Versioning`:
Versioning
----------
This documentation applies to version |release| of the zhmc-ansible-modules
package. You can also see that version in the top left corner of this page.
The zhmc-ansible-modules package uses the rules of `Semantic Versioning 2.0.0`_
for its version.
.. _Semantic Versioning 2.0.0: http://semver.org/spec/v2.0.0.html
This documentation may have been built from a development level of the
package. You can recognize a development version of this package by the
presence of a ".devD" suffix in the version string.
.. _`Compatibility`:
Compatibility
-------------
For Ansible modules, compatibility is always seen from the perspective of an
Ansible playbook using it. Thus, a backwards compatible new version of the
zhmc-ansible-modules package means that the user can safely upgrade to that new
version without encountering compatibility issues in any Ansible playbooks
using these modules.
This package uses the rules of `Semantic Versioning 2.0.0`_ for compatibility
between package versions, and for :ref:`deprecations <Deprecations>`.
The public interface of this package that is subject to the semantic versioning
rules (and specificically to its compatibility rules) are the Ansible module
interfaces described in this documentation.
Violations of these compatibility rules are described in section
:ref:`Change log`.
.. _`Deprecations`:
Deprecations
------------
Deprecated functionality is marked accordingly in this documentation and in the
:ref:`Change log`.
.. _`Reporting issues`:
Reporting issues
----------------
If you encounter any problem with this package, or if you have questions of any
kind related to this package (even when they are not about a problem), please
open an issue in the `zhmc-ansible-modules issue tracker`_.
.. _`zhmc-ansible-modules issue tracker`: https://github.com/zhmcclient/zhmc-ansible-modules/issues
.. _`License`:
License
-------
This package is licensed under the `Apache 2.0 License`_.
.. _Apache 2.0 License: https://raw.githubusercontent.com/zhmcclient/zhmc-ansible-modules/master/LICENSE
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/intro.rst | intro.rst |
.. _zhmc_storage_group:
zhmc_storage_group - Manages DPM storage groups (with "dpm-storage-management" feature)
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Gathers facts about a storage group associated with a CPC, including its storage volumes and virtual storage resources.
* Creates, deletes and updates a storage group associated with a CPC.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.20.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC associated with the target storage group.</div>
</td>
</tr>
<tr>
<td>expand<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td><ul><li>yes</li><li>no</li></ul></td>
<td>
<div>Boolean that controls whether the returned storage group contains additional artificial properties that expand certain URI or name properties to the full set of resource properties (see description of return values of this module).</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target storage group.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No properties.</td>
<td></td>
<td>
<div>Dictionary with desired properties for the storage group. Used for <code>state=present</code>; ignored for <code>state=absent|facts</code>. Dictionary key is the property name with underscores instead of hyphens, and dictionary value is the property value in YAML syntax. Integer properties may also be provided as decimal strings.</div>
<div>The possible input properties in this dictionary are the properties defined as writeable in the data model for Storage Group resources (where the property names contain underscores instead of hyphens), with the following exceptions:</div>
<div>* <code>name</code>: Cannot be specified because the name has already been specified in the <code>name</code> module parameter.</div>
<div>* <code>type</code>: Cannot be changed once the storage group exists.</div>
<div>Properties omitted in this dictionary will remain unchanged when the storage group already exists, and will get the default value defined in the data model for storage groups in the HMC API book when the storage group is being created.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>absent</li><li>present</li><li>facts</li></ul></td>
<td>
<div>The desired state for the target storage group:</div>
<div>* <code>absent</code>: Ensures that the storage group does not exist. If the storage group is currently attached to any partitions, the module will fail.</div>
<div>* <code>present</code>: Ensures that the storage group exists and is associated with the specified CPC, and has the specified properties. The attachment state of the storage group to a partition is not changed.</div>
<div>* <code>facts</code>: Does not change anything on the storage group and returns the storage group properties.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about a storage group
zhmc_storage_group:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_storage_group_name }}"
state: facts
expand: true
register: sg1
- name: Ensure the storage group does not exist
zhmc_storage_group:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_storage_group_name }}"
state: absent
- name: Ensure the storage group exists
zhmc_storage_group:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_storage_group_name }}"
state: present
expand: true
properties:
description: "Example storage group 1"
type: fcp
shared: false
connectivity: 4
max-partitions: 1
register: sg1
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>storage_group</td>
<td>
<div>For <code>state=absent</code>, an empty dictionary.</div>
<div>For <code>state=present|facts</code>, a dictionary with the resource properties of the target storage group, plus additional artificial properties as described in the following list items. The dictionary keys are the exact property names as described in the data model for the resource, i.e. they contain hyphens (-), not underscores (_). The dictionary values are the property values using the Python representations described in the documentation of the zhmcclient Python package. The additional artificial properties are:</div>
<div>* <code>attached-partition-names</code>: List of partition names to which the storage group is attached.</div>
<div>* <code>cpc-name</code>: Name of the CPC that is associated to this storage group.</div>
<div>* <code>candidate-adapter-ports</code> (only if expand was requested): List of candidate adapter ports of the storage group. Each port is represented as a dictionary of its properties; in addition each port has an artificial property <code>parent-adapter</code> which represents the adapter of the port. Each adapter is represented as a dictionary of its properties.</div>
<div>* <code>storage-volumes</code> (only if expand was requested): List of storage volumes of the storage group. Each storage volume is represented as a dictionary of its properties.</div>
<div>* <code>virtual-storage-resources</code> (only if expand was requested): List of virtual storage resources of the storage group. Each virtual storage resource is represented as a dictionary of its properties.</div>
<div>* <code>attached-partitions</code> (only if expand was requested): List of partitions to which the storage group is attached. Each partition is represented as a dictionary of its properties.</div>
<div>* <code>cpc</code> (only if expand was requested): The CPC that is associated to this storage group. The CPC is represented as a dictionary of its properties.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "sg-1",
"description": "storage group #1",
...
}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- The CPC that is associated with the target storage group must be in the Dynamic Partition Manager (DPM) operational mode and must have the "dpm-storage-management" firmware feature enabled. That feature has been introduced with the z14-ZR1 / Rockhopper II machine generation.
- This module performs actions only against the Z HMC regarding the definition of storage group objects and their attachment to partitions. This module does not perform any actions against storage subsystems or SAN switches.
- Attachment of a storage group to and from partitions is managed by the Ansible module zhmc_storage_group_attachment.
- The Ansible module zhmc_hba is no longer used on CPCs that have the "dpm-storage-management" feature enabled.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_storage_group_module.rst | zhmc_storage_group_module.rst |
.. _zhmc_virtual_function:
zhmc_virtual_function - Manages virtual functions in existing partitions
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Creates, updates, and deletes virtual functions in existing partitions of a CPC.
* The targeted CPC must be in the Dynamic Partition Manager (DPM) operational mode.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.14.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC with the partition containing the virtual function.</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target virtual function that is managed. If the virtual function needs to be created, this value becomes its name.</div>
</td>
</tr>
<tr>
<td>partition_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the partition containing the virtual function.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No input properties</td>
<td></td>
<td>
<div>Dictionary with input properties for the virtual function, for <code>state=present</code>. Key is the property name with underscores instead of hyphens, and value is the property value in YAML syntax. Integer properties may also be provided as decimal strings. Will be ignored for <code>state=absent</code>.</div>
<div>The possible input properties in this dictionary are the properties defined as writeable in the data model for Virtual Function resources (where the property names contain underscores instead of hyphens), with the following exceptions:</div>
<div>* <code>name</code>: Cannot be specified because the name has already been specified in the <code>name</code> module parameter.</div>
<div>* <code>adapter_uri</code>: Cannot be specified because this information is specified using the artificial property <code>adapter_name</code>.</div>
<div>* <code>adapter_name</code>: The name of the adapter that backs the target virtual function.</div>
<div>Properties omitted in this dictionary will remain unchanged when the virtual function already exists, and will get the default value defined in the data model for virtual functions when the virtual function is being created.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>absent</li><li>present</li></ul></td>
<td>
<div>The desired state for the target virtual function:</div>
<div><code>absent</code>: Ensures that the virtual function does not exist in the specified partition.</div>
<div><code>present</code>: Ensures that the virtual function exists in the specified partition and has the specified properties.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Ensure virtual function exists in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_vfunction_name }}"
state: present
properties:
adapter_name: "ABC-123"
description: "The accelerator adapter"
device_number: "033F"
register: vfunction1
- name: Ensure virtual function does not exist in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_vfunction_name }}"
state: absent
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>virtual_function</td>
<td>
<div>For <code>state=absent</code>, an empty dictionary.</div>
<div>For <code>state=present</code>, a dictionary with the resource properties of the virtual function (after changes, if any). The dictionary keys are the exact property names as described in the data model for the resource, i.e. they contain hyphens (-), not underscores (_). The dictionary values are the property values using the Python representations described in the documentation of the zhmcclient Python package.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "vfunction-1",
"description": "virtual function #1",
"adapter-uri': "/api/adapters/...",
...
}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- See also Ansible module zhmc_partition.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_virtual_function_module.rst | zhmc_virtual_function_module.rst |
Return Values
-------------
.. contents:: Topics
Ansible modules normally return a data structure that can be registered into a variable, or seen directly when output by
the `ansible` program. Each module can optionally document its own unique return values (visible through ansible-doc and https://docs.ansible.com).
This document covers return values common to all modules.
.. note:: Some of these keys might be set by Ansible itself once it processes the module's return information.
.. _common_return_values:
Common
^^^^^^
backup_file
```````````
For those modules that implement `backup=no|yes` when manipulating files, a path to the backup file created.
changed
```````
A boolean indicating if the task had to make changes.
failed
``````
A boolean that indicates if the task was failed or not.
invocation
``````````
Information on how the module was invoked.
msg
```
A string with a generic message relayed to the user.
rc
``
Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, etc), this field contains 'return code' of these utilities.
results
```````
If this key exists, it indicates that a loop was present for the task and that it contains a list of the normal module 'result' per item.
skipped
```````
A boolean that indicates if the task was skipped or not
stderr
``````
Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, etc), this field contains the error output of these utilities.
stderr_lines
````````````
When c(stderr) is returned we also always provide this field which is a list of strings, one item per line from the original.
stdout
``````
Some modules execute command line utilities or are geared for executing commands directly (raw, shell, command, etc). This field contains the normal output of these utilities.
stdout_lines
````````````
When c(stdout) is returned, Ansible always provides a list of strings, each containing one item per line from the original output.
.. _internal_return_values:
Internal use
^^^^^^^^^^^^
These keys can be added by modules but will be removed from registered variables; they are 'consumed' by Ansible itself.
ansible_facts
`````````````
This key should contain a dictionary which will be appended to the facts assigned to the host. These will be directly accessible and don't require using a registered variable.
exception
`````````
This key can contain traceback information caused by an exception in a module. It will only be displayed on high verbosity (-vvv).
warnings
````````
This key contains a list of strings that will be presented to the user.
deprecations
````````````
This key contains a list of dictionaries that will be presented to the user. Keys of the dictionaries are `msg` and `version`, values are string, value for the `version` key can be an empty string.
.. seealso::
:doc:`modules`
Learn about available modules
`GitHub Core modules directory <https://github.com/ansible/ansible-modules-core/tree/devel>`_
Browse source of core modules
`Github Extras modules directory <https://github.com/ansible/ansible-modules-extras/tree/devel>`_
Browse source of extras modules.
`Mailing List <http://groups.google.com/group/ansible-devel>`_
Development mailing list
`irc.freenode.net <http://irc.freenode.net>`_
#ansible IRC chat channel
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/common_return_values.rst | common_return_values.rst |
.. _zhmc_storage_volume:
zhmc_storage_volume - Manages DPM storage volumes in existing storage groups (with "dpm-storage-management" feature)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Gathers facts about a storage volume in a storage group associated with a CPC.
* Creates, deletes and updates a storage volume in a storage group associated with a CPC.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.20.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC associated with the storage group containing the target storage volume.</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target storage volume.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No properties.</td>
<td></td>
<td>
<div>Dictionary with desired properties for the storage volume. Used for <code>state=present</code>; ignored for <code>state=absent|facts</code>. Dictionary key is the property name with underscores instead of hyphens, and dictionary value is the property value in YAML syntax. Integer properties may also be provided as decimal strings.</div>
<div>The possible input properties in this dictionary are the properties defined as writeable in the data model for Storage Volume resources (where the property names contain underscores instead of hyphens), with the following exceptions:</div>
<div>* <code>name</code>: Cannot be specified because the name has already been specified in the <code>name</code> module parameter.</div>
<div>Properties omitted in this dictionary will remain unchanged when the storage volume already exists, and will get the default value defined in the data model for storage volumes in the HMC API book when the storage volume is being created.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>absent</li><li>present</li><li>facts</li></ul></td>
<td>
<div>The desired state for the target storage volume:</div>
<div>* <code>absent</code>: Ensures that the storage volume does not exist in the specified storage group.</div>
<div>* <code>present</code>: Ensures that the storage volume exists in the specified storage group, and has the specified properties.</div>
<div>* <code>facts</code>: Does not change anything on the storage volume and returns the storage volume properties.</div>
</td>
</tr>
<tr>
<td>storage_group_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the storage group containing the target storage volume.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about a storage volume
zhmc_storage_volume:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
name: "{{ my_storage_volume_name }}"
state: facts
register: sv1
- name: Ensure the storage volume does not exist
zhmc_storage_volume:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
name: "{{ my_storage_volume_name }}"
state: absent
- name: Ensure the storage volume exists
zhmc_storage_volume:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
name: "{{ my_storage_volume_name }}"
state: present
properties:
description: "Example storage volume 1"
size: 1
register: sv1
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>storage_volume</td>
<td>
<div>For <code>state=absent</code>, an empty dictionary.</div>
<div>For <code>state=present|facts</code>, a dictionary with the resource properties of the storage volume, indicating the state after changes from this module (if any) have been applied. The dictionary keys are the exact property names as described in the data model for the resource, i.e. they contain hyphens (-), not underscores (_). The dictionary values are the property values using the Python representations described in the documentation of the zhmcclient Python package. The additional artificial properties are:</div>
<div>* <code>type</code>: Type of the storage volume ('fc' or 'fcp'), as defined in its storage group.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "sv-1",
"description": "storage volume #1",
...
}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- The CPC that is associated with the storage group must be in the Dynamic Partition Manager (DPM) operational mode and must have the "dpm-storage-management" firmware feature enabled. That feature has been introduced with the z14-ZR1 / Rockhopper II machine generation.
- This module performs actions only against the Z HMC regarding the definition of storage volume objects within storage group objects. This module does not perform any actions against storage subsystems or SAN switches.
- The Ansible module zhmc_hba is no longer used on CPCs that have the "dpm-storage-management" feature enabled.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_storage_volume_module.rst | zhmc_storage_volume_module.rst |
.. _zhmc_crypto_attachment:
zhmc_crypto_attachment - Manages the attachment of crypto adapters and domains to partitions.
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Gathers facts about the attachment of crypto adapters and domains to a partition.
* Attaches a range of crypto domains and a number of crypto adapters to a partition.
* Detaches all crypto domains and all crypto adapters from a partition.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.20.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>access_mode<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>usage</td>
<td><ul><li>usage</li><li>control</li></ul></td>
<td>
<div>Only for <code>state=attach</code>: The access mode in which the crypto domains specified in <code>domain_range</code> need to be attached.</div>
</td>
</tr>
<tr>
<td>adapter_count<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>-1</td>
<td></td>
<td>
<div>Only for <code>state=attach</code>: The number of crypto adapters the partition needs to have attached. The special value -1 means all adapters of the desired crypto type in the CPC.</div>
</td>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC that has the partition and the crypto adapters.</div>
</td>
</tr>
<tr>
<td>crypto_type<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>ep11</td>
<td><ul><li>ep11</li><li>cca</li><li>acc</li></ul></td>
<td>
<div>Only for <code>state=attach</code>: The crypto type of the crypto adapters that will be considered for attaching.</div>
</td>
</tr>
<tr>
<td>domain_range<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>(0, -1)</td>
<td></td>
<td>
<div>Only for <code>state=attach</code>: The domain range the partition needs to have attached, as a tuple of integers (min, max) that specify the inclusive range of domain index numbers. Other domains attached to the partition remain unchanged. The special value -1 for the max item means the maximum supported domain index number.</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>partition_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the partition to which the crypto domains and crypto adapters are attached.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>attached</li><li>detached</li><li>facts</li></ul></td>
<td>
<div>The desired state for the attachment:</div>
<div>* <code>attached</code>: Ensures that the specified number of crypto adapters of the specified crypto type, and the specified range of domain index numbers in the specified access mode are attached to the partition.</div>
<div>* <code>detached</code>: Ensures that no crypto adapter and no crypto domains are attached to the partition.</div>
<div>* <code>facts</code>: Does not change anything on the attachment and returns the crypto configuration of the partition.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about the crypto configuration of a partition
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
state: facts
register: crypto1
- name: Ensure domain 0 on all ep11 adapters is attached in usage mode
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_first_partition_name }}"
state: attached
crypto_type: ep11
adapter_count: -1
domain_range: 0,0
access_mode: usage
- name: Ensure domains 1-max on all ep11 adapters are attached in control mode
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_first_partition_name }}"
state: attached
crypto_type: ep11
adapter_count: -1
domain_range: 1,-1
access_mode: control
- name: Ensure domains 0-max on 1 ep11 adapter are attached to in usage mode
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_second_partition_name }}"
state: attached
crypto_type: ep11
adapter_count: 1
domain_range: 0,-1
access_mode: usage
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>crypto_configuration</td>
<td>
<div>For <code>state=detached|attached|facts</code>, a dictionary with the crypto configuration of the partition after the changes applied by the module. Key is the partition name, and value is a dictionary with keys: - 'adapters': attached adapters, as a dict of key: adapter name, value: dict of adapter properties; - 'domain_config': attached domains, as a dict of key: domain index, value: access mode ('control' or 'usage'); - 'usage_domains': domains attached in usage mode, as a list of domain index numbers; - 'control_domains': domains attached in control mode, as a list of domain index numbers.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"part-1": {
"adapters": {
"adapter 1": {
"type": "crypto",
...
}
},
"domain_config": {
"0": "usage",
"1": "control",
"2": "control"
}
"usage_domains": [0],
"control_domains": [1, 2]
}
}</code>
</td>
</tr>
<tr>
<td>changes</td>
<td>
<div>For <code>state=detached|attached|facts</code>, a dictionary with the changes performed.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"added-adapters": ["adapter 1", "adapter 2"],
"added-domains": ["0", "1"]
}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- The CPC of the target partition must be in the Dynamic Partition Manager (DPM) operational mode.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_crypto_attachment_module.rst | zhmc_crypto_attachment_module.rst |
.. _zhmc_hba:
zhmc_hba - Manages HBAs in existing partitions (without "dpm-storage-management" feature)
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Creates, updates, and deletes HBAs in existing partitions of a CPC.
* The targeted CPC must be in the Dynamic Partition Manager (DPM) operational mode.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.14.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC with the partition containing the HBA.</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target HBA that is managed. If the HBA needs to be created, this value becomes its name.</div>
</td>
</tr>
<tr>
<td>partition_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the partition containing the HBA.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No input properties</td>
<td></td>
<td>
<div>Dictionary with input properties for the HBA, for <code>state=present</code>. Key is the property name with underscores instead of hyphens, and value is the property value in YAML syntax. Integer properties may also be provided as decimal strings. Will be ignored for <code>state=absent</code>.</div>
<div>The possible input properties in this dictionary are the properties defined as writeable in the data model for HBA resources (where the property names contain underscores instead of hyphens), with the following exceptions:</div>
<div>* <code>name</code>: Cannot be specified because the name has already been specified in the <code>name</code> module parameter.</div>
<div>* <code>adapter_port_uri</code>: Cannot be specified because this information is specified using the artificial properties <code>adapter_name</code> and <code>adapter_port</code>.</div>
<div>* <code>adapter_name</code>: The name of the adapter that has the port backing the target HBA. Cannot be changed after the HBA exists.</div>
<div>* <code>adapter_port</code>: The port index of the adapter port backing the target HBA. Cannot be changed after the HBA exists.</div>
<div>Properties omitted in this dictionary will remain unchanged when the HBA already exists, and will get the default value defined in the data model for HBAs when the HBA is being created.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>absent</li><li>present</li></ul></td>
<td>
<div>The desired state for the target HBA:</div>
<div><code>absent</code>: Ensures that the HBA does not exist in the specified partition.</div>
<div><code>present</code>: Ensures that the HBA exists in the specified partition and has the specified properties.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Ensure HBA exists in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_hba_name }}"
state: present
properties:
adapter_name: FCP-1
adapter_port: 0
description: "The port to our V7K #1"
device_number: "123F"
register: hba1
- name: Ensure HBA does not exist in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_hba_name }}"
state: absent
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>hba</td>
<td>
<div>For <code>state=absent</code>, an empty dictionary.</div>
<div>For <code>state=present</code>, a dictionary with the resource properties of the HBA (after changes, if any). The dictionary keys are the exact property names as described in the data model for the resource, i.e. they contain hyphens (-), not underscores (_). The dictionary values are the property values using the Python representations described in the documentation of the zhmcclient Python package.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "hba-1",
"description": "HBA #1",
"adapter-port-uri": "/api/adapters/.../ports/...",
...
}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- See also Ansible module zhmc_partition.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_hba_module.rst | zhmc_hba_module.rst |
.. _zhmc_cpc:
zhmc_cpc - Manages a CPC.
+++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Gathers facts about the CPC including its child resources.
* Updates the properties of a CPC.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.20.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target CPC.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No property changes.</td>
<td></td>
<td>
<div>Only for <code>state=set</code>: New values for the properties of the CPC. Properties omitted in this dictionary will remain unchanged. This parameter will be ignored for <code>state=facts</code>.</div>
<div>The parameter is a dictionary. The key of each dictionary item is the property name as specified in the data model for CPC resources, with underscores instead of hyphens. The value of each dictionary item is the property value (in YAML syntax). Integer properties may also be provided as decimal strings.</div>
<div>The possible properties in this dictionary are the properties defined as writeable in the data model for CPC resources.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>set</li><li>facts</li></ul></td>
<td>
<div>The desired state for the attachment:</div>
<div>* <code>set</code>: Ensures that the CPC has the specified properties.</div>
<div>* <code>facts</code>: Does not change anything on the CPC and returns the CPC properties including its child resources.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about the CPC
zhmc_cpc:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
name: "{{ my_cpc_name }}"
state: facts
register: cpc1
- name: Ensure the CPC has the desired property values
zhmc_cpc:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
name: "{{ my_cpc_name }}"
state: set
properties:
acceptable_status:
- active
description: "This is CPC {{ my_cpc_name }}"
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>cpc</td>
<td>
<div>For <code>state=set|facts</code>, a dictionary with the properties of the CPC. The properties contain these additional artificial properties for listing its child resources: - 'partitions': The defined partitions of the CPC, as a dict of key: partition name, value: dict of a subset of the partition properties (name, status, object_uri). - 'adapters': The adapters of the CPC, as a dict of key: adapter name, value: dict of a subset of the adapter properties (name, status, object_uri).</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "CPCA",
"description": "CPC A",
"status": "active",
"acceptable_status": [ "active" ],
...
"partitions": [
{
"name": "part-1",
...
},
...
],
"adapters": [
{
"name": "adapter-1",
...
},
...
],
}</code>
</td>
</tr>
</table>
</br>
</br>
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_cpc_module.rst | zhmc_cpc_module.rst |
.. _zhmc_adapter:
zhmc_adapter - Manages an adapter in a CPC.
+++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Gathers facts about the adapter including its ports.
* Updates the properties of an adapter.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.20.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>match<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No match properties</td>
<td></td>
<td>
<div>Only for <code>state=set</code>: Match properties for identifying the target adapter in the set of adapters in the CPC, if an adapter with the name specified in the <code>name</code> module parameter does not exist in that set. This parameter will be ignored otherwise.</div>
<div>Use of this parameter allows renaming an adapter: The <code>name</code> module parameter specifies the new name of the target adapter, and the <code>match</code> module parameter identifies the adapter to be renamed. This can be combined with other property updates by using the <code>properties</code> module parameter.</div>
<div>The parameter is a dictionary. The key of each dictionary item is the property name as specified in the data model for adapter resources, with underscores instead of hyphens. The value of each dictionary item is the match value for the property (in YAML syntax). Integer properties may also be provided as decimal strings.</div>
<div>The specified match properties follow the rules of filtering for the zhmcclient library as described in https://python-zhmcclient.readthedocs.io/en/stable/concepts.html#filtering</div>
<div>The possible match properties are all properties in the data model for adapter resources, including <code>name</code>.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target adapter. In case of renaming an adapter, this is the new name of the adapter.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No property changes (other than possibly C(name)).</td>
<td></td>
<td>
<div>Only for <code>state=set|present</code>: New values for the properties of the adapter. Properties omitted in this dictionary will remain unchanged. This parameter will be ignored for other states.</div>
<div>The parameter is a dictionary. The key of each dictionary item is the property name as specified in the data model for adapter resources, with underscores instead of hyphens. The value of each dictionary item is the property value (in YAML syntax). Integer properties may also be provided as decimal strings.</div>
<div>The possible properties in this dictionary are the properties defined as writeable in the data model for adapter resources, with the following exceptions:</div>
<div>* <code>name</code>: Cannot be specified as a property because the name has already been specified in the <code>name</code> module parameter.</div>
<div>* <code>type</code>: The desired adapter type can be specified in order to support adapters that can change their type (e.g. the FICON Express adapter can change its type between 'not-configured', 'fcp' and 'fc').</div>
<div>* <code>crypto_type</code>: The crypto type can be specified in order to support the ability of the Crypto Express adapters to change their crypto type. Valid values are 'ep11', 'cca' and 'acc'. Changing to 'acc' will zeroize the crypto adapter.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>set</li><li>present</li><li>absent</li><li>facts</li></ul></td>
<td>
<div>The desired state for the attachment:</div>
<div>* <code>set</code>: Ensures that an existing adapter has the specified properties.</div>
<div>* <code>present</code>: Ensures that a Hipersockets adapter exists and has the specified properties.</div>
<div>* <code>absent</code>: Ensures that a Hipersockets adapter does not exist.</div>
<div>* <code>facts</code>: Does not change anything on the adapter and returns the adapter properties including its ports.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about an existing adapter
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: facts
register: adapter1
- name: Ensure an existing adapter has the desired property values
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: set
properties:
description: "This is adapter {{ my_adapter_name }}"
register: adapter1
- name: "Ensure the existing adapter identified by its name or adapter ID has
the desired name and property values"
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
match:
adapter_id: "12C"
state: set
properties:
description: "This is adapter {{ my_adapter_name }}"
register: adapter1
- name: "Ensure a Hipersockets adapter exists and has the desired property
values"
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: present
properties:
type: hipersockets
description: "This is Hipersockets adapter {{ my_adapter_name }}"
register: adapter1
- name: "Ensure a Hipersockets adapter does not exist"
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: absent
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>cpc</td>
<td>
<div>For <code>state=absent</code>, an empty dictionary.</div>
<div>For <code>state=set|present|facts</code>, a dictionary with the properties of the adapter. The properties contain these additional artificial properties for listing its child resources: - 'ports': The ports of the adapter, as a dict of key: port name, value: dict of a subset of the port properties (name, status, element_uri).</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "adapter-1",
"description": "Adapter 1",
"status": "active",
"acceptable_status": [ "active" ],
...
"ports": [
{
"name": "Port 0",
...
},
...
]
}</code>
</td>
</tr>
</table>
</br>
</br>
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_adapter_module.rst | zhmc_adapter_module.rst |
All Modules
```````````
.. toctree:: :maxdepth: 1
zhmc_adapter - Manages an adapter in a CPC. <zhmc_adapter_module>
zhmc_cpc - Manages a CPC. <zhmc_cpc_module>
zhmc_crypto_attachment - Manages the attachment of crypto adapters and domains to partitions. <zhmc_crypto_attachment_module>
zhmc_hba - Manages HBAs in existing partitions (without "dpm-storage-management" feature) <zhmc_hba_module>
zhmc_nic - Manages NICs in existing partitions <zhmc_nic_module>
zhmc_partition - Manages partitions <zhmc_partition_module>
zhmc_storage_group - Manages DPM storage groups (with "dpm-storage-management" feature) <zhmc_storage_group_module>
zhmc_storage_group_attachment - Manages the attachment of DPM storage groups to partitions (with "dpm-storage-management" feature) <zhmc_storage_group_attachment_module>
zhmc_storage_volume - Manages DPM storage volumes in existing storage groups (with "dpm-storage-management" feature) <zhmc_storage_volume_module>
zhmc_virtual_function - Manages virtual functions in existing partitions <zhmc_virtual_function_module>
.. note::
- (D): This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged.
The module documentation details page may explain more about this rationale.
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/list_of_all_modules.rst | list_of_all_modules.rst |
.. _zhmc_partition:
zhmc_partition - Manages partitions
+++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Gathers facts about a partition, including its child resources (HBAs, NICs and virtual functions).
* Creates, updates, deletes, starts, and stops partitions in a CPC. The child resources of the partition are are managed by separate Ansible modules.
* The targeted CPC must be in the Dynamic Partition Manager (DPM) operational mode.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.14.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC with the target partition.</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target partition.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No input properties</td>
<td></td>
<td>
<div>Dictionary with input properties for the partition, for <code>state=stopped</code> and <code>state=active</code>. Key is the property name with underscores instead of hyphens, and value is the property value in YAML syntax. Integer properties may also be provided as decimal strings. Will be ignored for <code>state=absent</code>.</div>
<div>The possible input properties in this dictionary are the properties defined as writeable in the data model for Partition resources (where the property names contain underscores instead of hyphens), with the following exceptions:</div>
<div>* <code>name</code>: Cannot be specified because the name has already been specified in the <code>name</code> module parameter.</div>
<div>* <code>type</code>: Cannot be changed once the partition exists, because updating it is not supported.</div>
<div>* <code>boot_storage_device</code>: Cannot be specified because this information is specified using the artificial property <code>boot_storage_hba_name</code>.</div>
<div>* <code>boot_network_device</code>: Cannot be specified because this information is specified using the artificial property <code>boot_network_nic_name</code>.</div>
<div>* <code>boot_storage_hba_name</code>: The name of the HBA whose URI is used to construct <code>boot_storage_device</code>. Specifying it requires that the partition exists.</div>
<div>* <code>boot_network_nic_name</code>: The name of the NIC whose URI is used to construct <code>boot_network_device</code>. Specifying it requires that the partition exists.</div>
<div>* <code>crypto_configuration</code>: The crypto configuration for the partition, in the format of the <code>crypto-configuration</code> property of the partition (see HMC API book for details), with the exception that adapters are specified with their names in field <code>crypto_adapter_names</code> instead of their URIs in field <code>crypto_adapter_uris</code>. If the <code>crypto_adapter_names</code> field is null, all crypto adapters of the CPC will be used.</div>
<div>Properties omitted in this dictionary will remain unchanged when the partition already exists, and will get the default value defined in the data model for partitions in the HMC API book when the partition is being created.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>absent</li><li>stopped</li><li>active</li><li>facts</li></ul></td>
<td>
<div>The desired state for the target partition:</div>
<div><code>absent</code>: Ensures that the partition does not exist in the specified CPC.</div>
<div><code>stopped</code>: Ensures that the partition exists in the specified CPC, has the specified properties, and is in the 'stopped' status.</div>
<div><code>active</code>: Ensures that the partition exists in the specified CPC, has the specified properties, and is in the 'active' or 'degraded' status.</div>
<div><code>facts</code>: Does not change anything on the partition and returns the partition properties and the properties of its child resources (HBAs, NICs, and virtual functions).</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
# Because configuring LUN masking in the SAN requires the host WWPN, and the
# host WWPN is automatically assigned and will be known only after an HBA has
# been added to the partition, the partition needs to be created in stopped
# state. Also, because the HBA has not yet been created, the boot
# configuration cannot be done yet:
- name: Ensure the partition exists and is stopped
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: stopped
properties:
description: "zhmc Ansible modules: Example partition 1"
ifl_processors: 2
initial_memory: 1024
maximum_memory: 1024
register: part1
# After an HBA has been added (see Ansible module zhmc_hba), and LUN masking
# has been configured in the SAN, and a bootable image is available at the
# configured LUN and target WWPN, the partition can be configured for boot
# from the FCP LUN and can be started:
- name: Configure boot device and start the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: active
properties:
boot_device: storage-adapter
boot_storage_device_hba_name: hba1
boot_logical_unit_number: 00000000001
boot_world_wide_port_name: abcdefabcdef
register: part1
- name: Ensure the partition does not exist
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: absent
- name: Define crypto configuration
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: active
properties:
crypto_configuration:
crypto_adapter_names:
- adapter1
- adapter2
crypto_domain_configurations:
- domain_index: 0
access_mode: control-usage
- domain_index: 1
access_mode: control
register: part1
- name: Gather facts about a partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: facts
register: part1
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>partition</td>
<td>
<div>For <code>state=absent</code>, an empty dictionary.</div>
<div>For <code>state=stopped</code> and <code>state=active</code>, a dictionary with the resource properties of the partition (after changes, if any). The dictionary keys are the exact property names as described in the data model for the resource, i.e. they contain hyphens (-), not underscores (_). The dictionary values are the property values using the Python representations described in the documentation of the zhmcclient Python package.</div>
<div>For <code>state=facts</code>, a dictionary with the resource properties of the partition, including its child resources (HBAs, NICs, and virtual functions). The dictionary keys are the exact property names as described in the data model for the resource, i.e. they contain hyphens (-), not underscores (_). The dictionary values are the property values using the Python representations described in the documentation of the zhmcclient Python package. The properties of the child resources are represented in partition properties named 'hbas', 'nics', and 'virtual-functions', respectively.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "part-1",
"description": "partition #1",
"status": "active",
"boot-device": "storage-adapter",
...
}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- See also Ansible modules zhmc_hba, zhmc_nic, zhmc_virtual_function.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_partition_module.rst | zhmc_partition_module.rst |
.. _zhmc_storage_group_attachment:
zhmc_storage_group_attachment - Manages the attachment of DPM storage groups to partitions (with "dpm-storage-management" feature)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Gathers facts about the attachment of a storage group to a partition.
* Attaches and detaches a storage group to and from a partition.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.20.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC that has the partition and is associated with the storage group.</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>partition_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the partition for the attachment.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>detached</li><li>attached</li><li>facts</li></ul></td>
<td>
<div>The desired state for the attachment:</div>
<div>* <code>detached</code>: Ensures that the storage group is not attached to the partition. If the storage group is currently attached to the partition and the partition is currently active, the module will fail.</div>
<div>* <code>attached</code>: Ensures that the storage group is attached to the partition.</div>
<div>* <code>facts</code>: Does not change anything on the attachment and returns the attachment status.</div>
</td>
</tr>
<tr>
<td>storage_group_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the storage group for the attachment.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about the attachment
zhmc_storage_group_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
partition_name: "{{ my_partition_name }}"
state: facts
register: sga1
- name: Ensure the storage group is attached to the partition
zhmc_storage_group_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
partition_name: "{{ my_partition_name }}"
state: attached
- name: "Ensure the storage group is not attached to the partition."
zhmc_storage_group_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
partition_name: "{{ my_partition_name }}"
state: detached
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>storage_group_attachment</td>
<td>
<div>A dictionary with a single key 'attached' whose boolean value indicates whether the storage group is now actually attached to the partition. If check mode was requested, the actual (i.e. not the desired) attachment state is returned.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{"attached": true}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- The CPC that is associated with the target storage group must be in the Dynamic Partition Manager (DPM) operational mode and must have the "dpm-storage-management" firmware feature enabled. That feature has been introduced with the z14-ZR1 / Rockhopper II machine generation.
- This module performs actions only against the Z HMC regarding the attachment of storage group objects to partitions. This module does not perform any actions against storage subsystems or SAN switches.
- The Ansible module zhmc_hba is no longer used on CPCs that have the "dpm-storage-management" feature enabled.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_storage_group_attachment_module.rst | zhmc_storage_group_attachment_module.rst |
.. _zhmc_nic:
zhmc_nic - Manages NICs in existing partitions
++++++++++++++++++++++++++++++++++++++++++++++
.. contents::
:local:
:depth: 2
Synopsis
--------
* Creates, updates, and deletes NICs in existing partitions of a CPC.
* The targeted CPC must be in the Dynamic Partition Manager (DPM) operational mode.
Requirements (on host that executes module)
-------------------------------------------
* Network access to HMC
* zhmcclient >=0.14.0
* ansible >=2.2.0.0
Options
-------
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>cpc_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the CPC with the partition containing the NIC.</div>
</td>
</tr>
<tr>
<td>faked_session<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>Real HMC will be used.</td>
<td></td>
<td>
<div>A <code>zhmcclient_mock.FakedSession</code> object that has a mocked HMC set up. If provided, it will be used instead of connecting to a real HMC. This is used for testing purposes only.</div>
</td>
</tr>
<tr>
<td rowspan="2">hmc_auth<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The authentication credentials for the HMC.</div>
</tr>
<tr>
<td colspan="5">
<table border=1 cellpadding=4>
<caption><b>Dictionary object hmc_auth</b></caption>
<tr>
<th class="head">parameter</th>
<th class="head">required</th>
<th class="head">default</th>
<th class="head">choices</th>
<th class="head">comments</th>
</tr>
<tr>
<td>userid<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The userid (username) for authenticating with the HMC.</div>
</td>
</tr>
<tr>
<td>password<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The password for authenticating with the HMC.</div>
</td>
</tr>
</table>
</td>
</tr>
</td>
</tr>
<tr>
<td>hmc_host<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The hostname or IP address of the HMC.</div>
</td>
</tr>
<tr>
<td>log_file<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td></td>
<td></td>
<td>
<div>File path of a log file to which the logic flow of this module as well as interactions with the HMC are logged. If null, logging will be propagated to the Python root logger.</div>
</td>
</tr>
<tr>
<td>name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the target NIC that is managed. If the NIC needs to be created, this value becomes its name.</div>
</td>
</tr>
<tr>
<td>partition_name<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td></td>
<td>
<div>The name of the partition containing the NIC.</div>
</td>
</tr>
<tr>
<td>properties<br/><div style="font-size: small;"></div></td>
<td>no</td>
<td>No input properties</td>
<td></td>
<td>
<div>Dictionary with input properties for the NIC, for <code>state=present</code>. Key is the property name with underscores instead of hyphens, and value is the property value in YAML syntax. Integer properties may also be provided as decimal strings. Will be ignored for <code>state=absent</code>.</div>
<div>The possible input properties in this dictionary are the properties defined as writeable in the data model for NIC resources (where the property names contain underscores instead of hyphens), with the following exceptions:</div>
<div>* <code>name</code>: Cannot be specified because the name has already been specified in the <code>name</code> module parameter.</div>
<div>* <code>network_adapter_port_uri</code> and <code>virtual_switch_uri</code>: Cannot be specified because this information is specified using the artificial properties <code>adapter_name</code> and <code>adapter_port</code>.</div>
<div>* <code>adapter_name</code>: The name of the adapter that has the port backing the target NIC. Used for all adapter families (ROCE, OSA, Hipersockets).</div>
<div>* <code>adapter_port</code>: The port index of the adapter port backing the target NIC. Used for all adapter families (ROCE, OSA, Hipersockets).</div>
<div>Properties omitted in this dictionary will remain unchanged when the NIC already exists, and will get the default value defined in the data model for NICs when the NIC is being created.</div>
</td>
</tr>
<tr>
<td>state<br/><div style="font-size: small;"></div></td>
<td>yes</td>
<td></td>
<td><ul><li>absent</li><li>present</li></ul></td>
<td>
<div>The desired state for the target NIC:</div>
<div><code>absent</code>: Ensures that the NIC does not exist in the specified partition.</div>
<div><code>present</code>: Ensures that the NIC exists in the specified partition and has the specified properties.</div>
</td>
</tr>
</table>
</br>
Examples
--------
::
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Ensure NIC exists in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_nic_name }}"
state: present
properties:
adapter_name: "OSD 0128 A13B-13"
adapter_port: 0
description: "The port to our data network"
device_number: "023F"
register: nic1
- name: Ensure NIC does not exist in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_nic_name }}"
state: absent
Return Values
-------------
Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
<table border=1 cellpadding=4>
<tr>
<th class="head">name</th>
<th class="head">description</th>
<th class="head">returned</th>
<th class="head">type</th>
<th class="head">sample</th>
</tr>
<tr>
<td>nic</td>
<td>
<div>For <code>state=absent</code>, an empty dictionary.</div>
<div>For <code>state=present</code>, a dictionary with the resource properties of the NIC (after changes, if any). The dictionary keys are the exact property names as described in the data model for the resource, i.e. they contain hyphens (-), not underscores (_). The dictionary values are the property values using the Python representations described in the documentation of the zhmcclient Python package.</div>
</td>
<td align=center>success</td>
<td align=center>dict</td>
<td align=center><code>{
"name": "nic-1",
"description": "NIC #1",
"virtual-switch-uri': "/api/vswitches/...",
...
}</code>
</td>
</tr>
</table>
</br>
</br>
Notes
-----
.. note::
- See also Ansible module zhmc_partition.
Status
~~~~~~
This module is flagged as **preview** which means that it is not guaranteed to have a backwards compatible interface.
Support
~~~~~~~
This module is community maintained without core committer oversight.
For more information on what this means please read `Module Support`_.
For help in developing on modules, should you be so inclined, please read the contribution guidelines in the module's `source repository`_, `Testing Ansible`_ and `Developing Modules`_.
.. _`Module Support`: http://docs.ansible.com/ansible/latest/modules_support.html
.. _`Testing Ansible`: http://docs.ansible.com/ansible/latest/dev_guide/testing.html
.. _`Developing Modules`: http://docs.ansible.com/ansible/latest/dev_guide/developing_modules.html
Shipment
~~~~~~~~
This module is a third-party module and is not shipped with Ansible. See the module's `source repository`_ for details.
.. _`source repository`: https://github.com/zhmcclient/zhmc-ansible-modules
| zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/docs/gen/zhmc_nic_module.rst | zhmc_nic_module.rst |
from ansible.module_utils.basic import *
import requests.packages.urllib3
import zhmcclient
requests.packages.urllib3.disable_warnings()
def zhmc_activate(data):
auth_hmc = data['auth_hmc']
auth_userid = data['auth_userid']
auth_password = data['auth_password']
cpc_name = data['cpc_name']
lpar_name = data['lpar_name']
load_address = data['load']
changed = False
try:
session = zhmcclient.Session(auth_hmc, auth_userid, auth_password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
lpar = cpc.lpars.find(name=lpar_name)
if lpar.properties['status'] in ["not-activated"]:
result = lpar.activate()
lpar = cpc.lpars.find(name=lpar_name)
changed = True
if lpar.properties['status'] not in ["operating"] and load_address:
result = lpar.load(load_address)
changed = True
session.logoff()
return False, changed, result
except zhmcclient.Error as exc:
session.logoff()
return True, False, str(exc)
def zhmc_deactivate(data):
auth_hmc = data['auth_hmc']
auth_userid = data['auth_userid']
auth_password = data['auth_password']
cpc_name = data['cpc_name']
lpar_name = data['lpar_name']
changed = False
result = "Nothing"
try:
session = zhmcclient.Session(auth_hmc, auth_userid, auth_password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
lpar = cpc.lpars.find(name=lpar_name)
if lpar.properties['status'] in ["operating", "not-operating", "exceptions" ]:
result = lpar.deactivate()
changed = True
session.logoff()
return False, changed, result
except zhmcclient.Error as exc:
session.logoff()
return True, False, str(exc)
def main():
fields = {
"auth_hmc": {"required": True, "type": "str"},
"auth_userid": {"required": True, "type": "str"},
"auth_password": {"required": True, "type": "str"},
"cpc_name": {"required": True, "type": "str"},
"lpar_name": {"required": True, "type": "str"},
"load": {"required": False, "type": "str"},
"state": {
"required": True,
"choices": ['activated', 'deactivated'],
"type": 'str'
}
}
choice_map = {
"activated": zhmc_activate,
"deactivated": zhmc_deactivate,
}
module = AnsibleModule(argument_spec=fields)
is_error, has_changed, result = choice_map.get(module.params['state'])(module.params)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
# module.fail_json(msg="Error activating LPAR", meta=result)
module.fail_json(msg="Error activating LPAR: " + result)
if __name__ == '__main__':
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/attic/ansible/modules/zhmc/zhmc_lpar.py | zhmc_lpar.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
get_hmc_auth, get_session, to_unicode, process_normal_property, eq_hex
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_adapter
version_added: "0.6"
short_description: Manages an adapter in a CPC.
description:
- Gathers facts about the adapter including its ports.
- Updates the properties of an adapter.
notes:
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.20.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
name:
description:
- The name of the target adapter. In case of renaming an adapter, this is
the new name of the adapter.
required: true
match:
description:
- "Only for C(state=set): Match properties for identifying the
target adapter in the set of adapters in the CPC, if an adapter with
the name specified in the C(name) module parameter does not exist in
that set. This parameter will be ignored otherwise."
- "Use of this parameter allows renaming an adapter:
The C(name) module parameter specifies the new name of the target
adapter, and the C(match) module parameter identifies the adapter to
be renamed.
This can be combined with other property updates by using the
C(properties) module parameter."
- "The parameter is a dictionary. The key of each dictionary item is the
property name as specified in the data model for adapter resources,
with underscores instead of hyphens. The value of each dictionary item
is the match value for the property (in YAML syntax). Integer
properties may also be provided as decimal strings."
- "The specified match properties follow the rules of filtering for the
zhmcclient library as described in
https://python-zhmcclient.readthedocs.io/en/stable/concepts.html#filtering"
- "The possible match properties are all properties in the data model for
adapter resources, including C(name)."
required: false
default: No match properties
state:
description:
- "The desired state for the attachment:"
- "* C(set): Ensures that an existing adapter has the specified
properties."
- "* C(present): Ensures that a Hipersockets adapter exists and has the
specified properties."
- "* C(absent): Ensures that a Hipersockets adapter does not exist."
- "* C(facts): Does not change anything on the adapter and returns
the adapter properties including its ports."
required: true
choices: ['set', 'present', 'absent', 'facts']
properties:
description:
- "Only for C(state=set|present): New values for the properties of the
adapter.
Properties omitted in this dictionary will remain unchanged.
This parameter will be ignored for other states."
- "The parameter is a dictionary. The key of each dictionary item is the
property name as specified in the data model for adapter resources,
with underscores instead of hyphens. The value of each dictionary item
is the property value (in YAML syntax). Integer properties may also be
provided as decimal strings."
- "The possible properties in this dictionary are the properties
defined as writeable in the data model for adapter resources, with the
following exceptions:"
- "* C(name): Cannot be specified as a property because the name has
already been specified in the C(name) module parameter."
- "* C(type): The desired adapter type can be specified in order to
support adapters that can change their type (e.g. the FICON Express
adapter can change its type between 'not-configured', 'fcp' and
'fc')."
- "* C(crypto_type): The crypto type can be specified in order to support
the ability of the Crypto Express adapters to change their crypto
type. Valid values are 'ep11', 'cca' and 'acc'. Changing to 'acc'
will zeroize the crypto adapter."
required: false
default: No property changes (other than possibly C(name)).
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about an existing adapter
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: facts
register: adapter1
- name: Ensure an existing adapter has the desired property values
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: set
properties:
description: "This is adapter {{ my_adapter_name }}"
register: adapter1
- name: "Ensure the existing adapter identified by its name or adapter ID has
the desired name and property values"
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
match:
adapter_id: "12C"
state: set
properties:
description: "This is adapter {{ my_adapter_name }}"
register: adapter1
- name: "Ensure a Hipersockets adapter exists and has the desired property
values"
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: present
properties:
type: hipersockets
description: "This is Hipersockets adapter {{ my_adapter_name }}"
register: adapter1
- name: "Ensure a Hipersockets adapter does not exist"
zhmc_adapter:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_adapter_name }}"
state: absent
"""
RETURN = """
cpc:
description:
- "For C(state=absent), an empty dictionary."
- "For C(state=set|present|facts), a dictionary with the properties of the
adapter. The properties contain these additional artificial properties
for listing its child resources:
- 'ports': The ports of the adapter, as a dict of key:
port name, value: dict of a subset of the port properties
(name, status, element_uri)."
returned: success
type: dict
sample: |
C({
"name": "adapter-1",
"description": "Adapter 1",
"status": "active",
"acceptable_status": [ "active" ],
...
"ports": [
{
"name": "Port 0",
...
},
...
]
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_adapter'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of adapter resources, in this format:
# name: (allowed, create, update, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Indicates whether it can be specified for the "Create Hipersockets
# Adapter" operation (that is the only type of creatable adapter).
# update: Indicates whether it can be specified for the "Modify Adapter
# Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Modify
# Adapter Properties" operation while the adapter is active. None means
# "not applicable" (used for update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_ADAPTER_PROPERTIES = {
# create-only properties: None
# update-only properties:
'crypto_type': (True, False, True, True, None, None),
# crypto_type: used for Change Crypto Type
'allowed_capacity': (True, False, True, True, None, int),
'channel_path_id': (True, False, True, True, eq_hex, None),
'crypto_number': (True, False, True, True, None, int),
'tke_commands_enabled': (True, False, True, True, None, None),
# create+update properties: (create is for hipersockets)
'name': (False, True, True, True, None, None), # in 'name' parm
'description': (True, True, True, True, None, to_unicode),
'maximum_transmission_unit_size': (True, True, True, True, None, int),
'type': (True, True, True, True, None, None),
# type used for Create Hipersockets and for Change Adapter Type
# read-only properties:
'object_uri': (False, None, False, None, None, None),
'object_id': (False, None, False, None, None, None),
'parent': (False, None, False, None, None, None),
'class': (False, None, False, None, None, None),
'status': (False, None, False, None, None, None),
'adapter_id': (False, None, False, None, None, None),
'adapter_family': (False, None, False, None, None, None),
'detected_card_type': (False, None, False, None, None, None),
'card_location': (False, None, False, None, None, None),
'port_count': (False, None, False, None, None, None),
'network_port_uris': (False, None, False, None, None, None),
'storage_port_uris': (False, None, False, None, None, None),
'state': (False, None, False, None, None, None),
'configured_capacity': (False, None, False, None, None, None),
'used_capacity': (False, None, False, None, None, None),
'maximum_total_capacity': (False, None, False, None, None, None),
'physical_channel_status': (False, None, False, None, None, None),
'udx_loaded': (False, None, False, None, None, None),
}
# Conversion of crypto types between module parameter values and HMC values
CRYPTO_TYPES_MOD2HMC = {
'acc': 'accelerator',
'cca': 'cca-coprocessor',
'ep11': 'ep11-coprocessor',
}
def process_properties(adapter, params):
"""
Process the properties specified in the 'properties' module parameter,
and return a dictionary (update_props) that contains the properties that
can be updated. The input property values are compared with the existing
resource property values and the returned set of properties is the minimal
set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of properties that cannot be updated is surfaced by raising
ParameterError.
Parameters:
adapter (zhmcclient.Adapter): Existing adapter to be updated, or `None`
if the adapter does not exist.
params (dict): Module input parameters.
Returns:
tuple of (create_props, update_props), where:
* create_props: dict of properties from params that may be specified
in zhmcclient.AdapterManager.create_hipersocket() (may overlap with
update_props).
* update_props: dict of properties from params that may be specified
in zhmcclient.Adapter.update_properties() (may overlap with
create_props).
* change_adapter_type: String with new adapter type (i.e. input for
Change Adapter Type operation), or None if no change needed.
* change_crypto_type: String with new crypto type (i.e. input for
Change Crypto Type operation), or None if no change needed.
Raises:
ParameterError: An issue with the module parameters.
"""
# Prepare return values
create_props = {}
update_props = {}
change_adapter_type = None # New adapter type, if needed
change_crypto_type = None # New crypto type, if needed
# handle the 'name' module parameter
adapter_name = to_unicode(params['name'])
if adapter and adapter.properties.get('name', None) == adapter_name:
pass # adapter exists and has the desired name
else:
create_props['name'] = adapter_name
update_props['name'] = adapter_name
# handle the other input properties
input_props = params.get('properties', None)
if input_props is None:
input_props = {}
for prop_name in input_props:
try:
allowed, create, update, update_active, eq_func, type_cast = \
ZHMC_ADAPTER_PROPERTIES[prop_name]
except KeyError:
allowed = False
if not allowed:
raise ParameterError(
"Invalid adapter property {!r} specified in the 'properties' "
"module parameter.".format(prop_name))
if adapter and prop_name == 'type':
# Determine need to change the adapter type
_current_adapter_type = adapter.properties.get('type', None)
_input_adapter_type = input_props[prop_name]
if _input_adapter_type != _current_adapter_type:
change_adapter_type = _input_adapter_type
elif adapter and prop_name == 'crypto_type':
# Determine need to change the crypto type
_current_crypto_type = adapter.properties.get('crypto-type', None)
_input_crypto_type = CRYPTO_TYPES_MOD2HMC[input_props[prop_name]]
if _input_crypto_type != _current_crypto_type:
change_crypto_type = _input_crypto_type
else:
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_ADAPTER_PROPERTIES, input_props, adapter)
create_props.update(_create_props)
update_props.update(_update_props)
assert _stop is False
return create_props, update_props, change_adapter_type, change_crypto_type
def identify_adapter(cpc, name, match_props):
"""
Identify the target adapter based on its name, or if an adapter with that
name does not exist in the CPC, based on its match properties.
"""
try:
adapter = cpc.adapters.find(name=name)
except zhmcclient.NotFound:
if not match_props:
raise
match_props_hmc = dict()
for prop_name in match_props:
prop_name_hmc = prop_name.replace('_', '-')
match_value = match_props[prop_name]
# Apply type cast from property definition also to match values:
if prop_name in ZHMC_ADAPTER_PROPERTIES:
type_cast = ZHMC_ADAPTER_PROPERTIES[prop_name][5]
if type_cast:
match_value = type_cast(match_value)
match_props_hmc[prop_name_hmc] = match_value
adapter = cpc.adapters.find(**match_props_hmc)
return adapter
def ensure_set(params, check_mode):
"""
Identify the target adapter (that must exist) and ensure that the specified
properties are set on the adapter.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
adapter_name = params['name']
adapter_match = params['match']
faked_session = params.get('faked_session', None) # No default specified
changed = False
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
adapter = identify_adapter(cpc, adapter_name, adapter_match)
# The default exception handling is sufficient for the above.
adapter.pull_full_properties()
result = adapter.properties
# It was identified by name or match properties, so it does exist.
# Update its properties and change adapter and crypto type, if
# needed.
create_props, update_props, chg_adapter_type, chg_crypto_type = \
process_properties(adapter, params)
if update_props:
if not check_mode:
adapter.update_properties(update_props)
else:
result.update(update_props) # from input values
changed = True
if chg_adapter_type:
if not check_mode:
adapter.change_adapter_type(chg_adapter_type)
else:
result['type'] = chg_adapter_type
changed = True
if chg_crypto_type:
if not check_mode:
adapter.change_crypto_type(chg_crypto_type)
else:
result['crypto-type'] = chg_crypto_type
changed = True
if changed and not check_mode:
adapter.pull_full_properties()
result = adapter.properties # from actual values
ports = adapter.ports.list()
result_ports = list()
for port in ports:
# TODO: Disabling the following line mitigates the recent issue
# with HTTP error 404,4 when retrieving port properties.
# port.pull_full_properties()
result_ports.append(port.properties)
result['ports'] = result_ports
return changed, result
finally:
session.logoff()
def ensure_present(params, check_mode):
"""
Ensure that the specified Hipersockets adapter exists and has the
specified properties set.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
adapter_name = params['name']
faked_session = params.get('faked_session', None) # No default specified
changed = False
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
adapter = cpc.adapters.find(name=adapter_name)
except zhmcclient.NotFound:
adapter = None
if not adapter:
# It does not exist. The only possible adapter type
# that can be created is a Hipersockets adapter, but before
# creating one we check the 'type' input property to verify that
# the intention is really Hipersockets creation, and not just a
# mispelled name.
input_props = params.get('properties', None)
if input_props is None:
adapter_type = None
else:
adapter_type = input_props.get('type', None)
if adapter_type is None:
raise ParameterError(
"Input property 'type' missing when creating "
"Hipersockets adapter {!r} (must specify 'hipersockets')".
format(adapter_name))
if adapter_type != 'hipersockets':
raise ParameterError(
"Input property 'type' specifies {!r} when creating "
"Hipersockets adapter {!r} (must specify 'hipersockets').".
format(adapter_type, adapter_name))
create_props, update_props, _, _ = \
process_properties(adapter, params)
# This is specific to Hipersockets: There are no update-only
# properties, so any remaining such property is an input error
invalid_update_props = {}
for name in update_props:
if name not in create_props:
invalid_update_props[name] = update_props[name]
if invalid_update_props:
raise ParameterError(
"Invalid input properties specified when creating "
"Hipersockets adapter {!r}: {!r}".
format(adapter_name, invalid_update_props))
# While the 'type' input property is required for verifying
# the intention, it is not allowed as input for the
# Create Hipersocket HMC operation.
del create_props['type']
if not check_mode:
adapter = cpc.adapters.create_hipersocket(create_props)
adapter.pull_full_properties()
result = adapter.properties # from actual values
else:
adapter = None
result = dict()
result.update(create_props) # from input values
changed = True
else:
# It does exist.
# Update its properties and change adapter and crypto type, if
# needed.
adapter.pull_full_properties()
result = adapter.properties
create_props, update_props, chg_adapter_type, chg_crypto_type = \
process_properties(adapter, params)
if update_props:
if not check_mode:
adapter.update_properties(update_props)
else:
result.update(update_props) # from input values
changed = True
if chg_adapter_type:
if not check_mode:
adapter.change_adapter_type(chg_adapter_type)
else:
result['type'] = chg_adapter_type
changed = True
if chg_crypto_type:
if not check_mode:
adapter.change_crypto_type(chg_crypto_type)
else:
result['crypto-type'] = chg_crypto_type
changed = True
if changed and not check_mode:
adapter.pull_full_properties()
result = adapter.properties # from actual values
if adapter:
ports = adapter.ports.list()
result_ports = list()
for port in ports:
port.pull_full_properties()
result_ports.append(port.properties)
result['ports'] = result_ports
else:
# For now, we return no ports when creating in check mode
result['ports'] = dict()
return changed, result
finally:
session.logoff()
def ensure_absent(params, check_mode):
"""
Ensure that the specified Hipersockets adapter does not exist.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
adapter_name = params['name']
faked_session = params.get('faked_session', None) # No default specified
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
adapter = cpc.adapters.find(name=adapter_name)
except zhmcclient.NotFound:
return changed, result
if not check_mode:
adapter.delete()
changed = True
return changed, result
finally:
session.logoff()
def facts(params, check_mode):
"""
Identify the target CPC and return facts about the target CPC and its
child resources.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
adapter_name = params['name']
faked_session = params.get('faked_session', None) # No default specified
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
adapter = cpc.adapters.find(name=adapter_name)
# The default exception handling is sufficient for the above.
adapter.pull_full_properties()
result = adapter.properties
ports = adapter.ports.list()
result_ports = list()
for port in ports:
port.pull_full_properties()
result_ports.append(port.properties)
result['ports'] = result_ports
return False, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"set": ensure_set,
"present": ensure_present,
"absent": ensure_absent,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
name=dict(required=True, type='str'),
match=dict(required=False, type='dict', default={}),
state=dict(required=True, type='str',
choices=['set', 'present', 'absent', 'facts']),
properties=dict(required=False, type='dict', default={}),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, adapter: {!r}".
format(changed, result))
module.exit_json(
changed=changed, adapter=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_adapter.py | zhmc_adapter.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
wait_for_transition_completion, eq_hex, get_hmc_auth, get_session, \
to_unicode, process_normal_property
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_hba
version_added: "0.0"
short_description: Manages HBAs in existing partitions (without
"dpm-storage-management" feature)
description:
- Creates, updates, and deletes HBAs in existing partitions of a CPC.
- The targeted CPC must be in the Dynamic Partition Manager (DPM) operational
mode.
notes:
- See also Ansible module zhmc_partition.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
- Juergen Leopold (@leopoldjuergen, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.14.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC with the partition containing the HBA.
required: true
partition_name:
description:
- The name of the partition containing the HBA.
required: true
name:
description:
- The name of the target HBA that is managed. If the HBA needs to be
created, this value becomes its name.
required: true
state:
description:
- "The desired state for the target HBA:"
- "C(absent): Ensures that the HBA does not exist in the specified
partition."
- "C(present): Ensures that the HBA exists in the specified partition
and has the specified properties."
required: true
choices: ["absent", "present"]
properties:
description:
- "Dictionary with input properties for the HBA, for C(state=present).
Key is the property name with underscores instead of hyphens, and
value is the property value in YAML syntax. Integer properties may
also be provided as decimal strings. Will be ignored for
C(state=absent)."
- "The possible input properties in this dictionary are the properties
defined as writeable in the data model for HBA resources (where the
property names contain underscores instead of hyphens), with the
following exceptions:"
- "* C(name): Cannot be specified because the name has already been
specified in the C(name) module parameter."
- "* C(adapter_port_uri): Cannot be specified because this information is
specified using the artificial properties C(adapter_name) and
C(adapter_port)."
- "* C(adapter_name): The name of the adapter that has the port backing
the target HBA. Cannot be changed after the HBA exists."
- "* C(adapter_port): The port index of the adapter port backing the
target HBA. Cannot be changed after the HBA exists."
- "Properties omitted in this dictionary will remain unchanged when the
HBA already exists, and will get the default value defined in the
data model for HBAs when the HBA is being created."
required: false
default: No input properties
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Ensure HBA exists in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_hba_name }}"
state: present
properties:
adapter_name: FCP-1
adapter_port: 0
description: "The port to our V7K #1"
device_number: "123F"
register: hba1
- name: Ensure HBA does not exist in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_hba_name }}"
state: absent
"""
RETURN = """
hba:
description:
- "For C(state=absent), an empty dictionary."
- "For C(state=present), a dictionary with the resource properties of the
HBA (after changes, if any). The dictionary keys are the exact property
names as described in the data model for the resource, i.e. they contain
hyphens (-), not underscores (_). The dictionary values are the property
values using the Python representations described in the documentation
of the zhmcclient Python package."
returned: success
type: dict
sample: |
C({
"name": "hba-1",
"description": "HBA #1",
"adapter-port-uri": "/api/adapters/.../ports/...",
...
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_hba'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of HBA resources, in this format:
# name: (allowed, create, update, update_while_active, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Indicates whether it can be specified for the "Create HBA"
# operation.
# update: Indicates whether it can be specified for the "Update HBA
# Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Update
# HBA Properties" operation while the partition of the HBA is active. None
# means "not applicable" (i.e. update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_HBA_PROPERTIES = {
# create-only properties:
'adapter_port_uri': (
False, True, False, None, None, None), # via adapter_name/_port
'adapter_name': (
True, True, False, None, None,
None), # artificial property, type_cast ignored
'adapter_port': (
True, True, False, None, None,
None), # artificial property, type_cast ignored
# create+update properties:
'name': (
False, True, True, True, None, None), # provided in 'name' module parm
'description': (True, True, True, True, None, to_unicode),
'device_number': (True, True, True, True, eq_hex, None),
# read-only properties:
'element-uri': (False, False, False, None, None, None),
'element-id': (False, False, False, None, None, None),
'parent': (False, False, False, None, None, None),
'class': (False, False, False, None, None, None),
'wwpn': (False, False, False, None, None, None),
}
def process_properties(partition, hba, params):
"""
Process the properties specified in the 'properties' module parameter,
and return two dictionaries (create_props, update_props) that contain
the properties that can be created, and the properties that can be updated,
respectively. If the resource exists, the input property values are
compared with the existing resource property values and the returned set
of properties is the minimal set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of read-only properties, invalid properties (i.e. not
defined in the data model for partitions), and properties that are not
allowed because of restrictions or because they are auto-created from
an artificial property is surfaced by raising ParameterError.
- The properties resulting from handling artificial properties are
added to the returned dictionaries.
Parameters:
partition (zhmcclient.Partition): Partition containing the HBA. Must
exist.
hba (zhmcclient.Hba): HBA to be updated with the full set of current
properties, or `None` if it did not previously exist.
params (dict): Module input parameters.
Returns:
tuple of (create_props, update_props, stop), where:
* create_props: dict of properties for
zhmcclient.HbaManager.create()
* update_props: dict of properties for
zhmcclient.Hba.update_properties()
* stop (bool): Indicates whether some update properties require the
partition containg the HBA to be stopped when doing the update.
Raises:
ParameterError: An issue with the module parameters.
"""
create_props = {}
update_props = {}
stop = False
# handle 'name' property
hba_name = to_unicode(params['name'])
create_props['name'] = hba_name
# We looked up the HBA by name, so we will never have to update its name
# Names of the artificial properties
adapter_name_art_name = 'adapter_name'
adapter_port_art_name = 'adapter_port'
# handle the other properties
input_props = params.get('properties', {})
if input_props is None:
input_props = {}
for prop_name in input_props:
if prop_name not in ZHMC_HBA_PROPERTIES:
raise ParameterError(
"Property {!r} is not defined in the data model for "
"HBAs.".format(prop_name))
allowed, create, update, update_while_active, eq_func, type_cast = \
ZHMC_HBA_PROPERTIES[prop_name]
if not allowed:
raise ParameterError(
"Property {!r} is not allowed in the 'properties' module "
"parameter.".format(prop_name))
if prop_name in (adapter_name_art_name, adapter_port_art_name):
# Artificial properties will be processed together after this loop
continue
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_HBA_PROPERTIES, input_props, hba)
create_props.update(_create_props)
update_props.update(_update_props)
if _stop:
stop = True
# Process artificial properties
if (adapter_name_art_name in input_props) != \
(adapter_port_art_name in input_props):
raise ParameterError(
"Artificial properties {!r} and {!r} must either both be "
"specified or both be omitted.".
format(adapter_name_art_name, adapter_port_art_name))
if adapter_name_art_name in input_props and \
adapter_port_art_name in input_props:
adapter_name = to_unicode(input_props[adapter_name_art_name])
adapter_port_index = int(input_props[adapter_port_art_name])
try:
adapter = partition.manager.cpc.adapters.find(
name=adapter_name)
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not specify the name of an "
"existing adapter: {!r}".
format(adapter_name_art_name, adapter_name))
try:
port = adapter.ports.find(index=adapter_port_index)
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not specify the index of an "
"existing port on adapter {!r}: {!r}".
format(adapter_port_art_name, adapter_name,
adapter_port_index))
hmc_prop_name = 'adapter-port-uri'
if hba:
existing_port_uri = hba.get_property(hmc_prop_name)
if port.uri != existing_port_uri:
raise ParameterError(
"Artificial properties {!r} and {!r} cannot be used to "
"change the adapter port of an existing HBA".
format(adapter_name_art_name, adapter_port_art_name))
create_props[hmc_prop_name] = port.uri
return create_props, update_props, stop
def ensure_present(params, check_mode):
"""
Ensure that the HBA exists and has the specified properties.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
hba_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
partition = cpc.partitions.find(name=partition_name)
except zhmcclient.NotFound:
if check_mode:
# Once the partition is created, the HBA will also need to be
# created. Therefore, we set changed.
changed = True
return changed, result
raise
try:
hba = partition.hbas.find(name=hba_name)
hba.pull_full_properties()
except zhmcclient.NotFound:
hba = None
if not hba:
# It does not exist. Create it and update it if there are
# update-only properties.
if not check_mode:
create_props, update_props, stop = process_properties(
partition, hba, params)
hba = partition.hbas.create(create_props)
update2_props = {}
for name in update_props:
if name not in create_props:
update2_props[name] = update_props[name]
if update2_props:
hba.update_properties(update2_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
hba.pull_full_properties()
else:
# TODO: Show props in module result also in check mode.
pass
changed = True
else:
# It exists. Stop the partition if needed due to the HBA property
# update requirements, or wait for an updateable partition status,
# and update the HBA properties.
create_props, update_props, stop = process_properties(
partition, hba, params)
if update_props:
if not check_mode:
# HBA properties can all be updated while the partition is
# active, therefore:
assert not stop
wait_for_transition_completion(partition)
hba.update_properties(update_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
hba.pull_full_properties()
else:
# TODO: Show updated props in mod.result also in chk.mode
pass
changed = True
if hba:
result = hba.properties
return changed, result
finally:
session.logoff()
def ensure_absent(params, check_mode):
"""
Ensure that the HBA does not exist.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
hba_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
try:
hba = partition.hbas.find(name=hba_name)
except zhmcclient.NotFound:
return changed, result
if not check_mode:
hba.delete()
changed = True
return changed, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"absent": ensure_absent,
"present": ensure_present,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
partition_name=dict(required=True, type='str'),
name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['absent', 'present']),
properties=dict(required=False, type='dict', default={}),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(changed=changed, hba=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_hba.py | zhmc_hba.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
get_hmc_auth, get_session, to_unicode, process_normal_property
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_cpc
version_added: "0.6"
short_description: Manages a CPC.
description:
- Gathers facts about the CPC including its child resources.
- Updates the properties of a CPC.
notes:
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.20.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
name:
description:
- The name of the target CPC.
required: true
state:
description:
- "The desired state for the attachment:"
- "* C(set): Ensures that the CPC has the specified properties."
- "* C(facts): Does not change anything on the CPC and returns
the CPC properties including its child resources."
required: true
choices: ['set', 'facts']
properties:
description:
- "Only for C(state=set): New values for the properties of the CPC.
Properties omitted in this dictionary will remain unchanged.
This parameter will be ignored for C(state=facts)."
- "The parameter is a dictionary. The key of each dictionary item is the
property name as specified in the data model for CPC resources, with
underscores instead of hyphens. The value of each dictionary item is
the property value (in YAML syntax). Integer properties may also be
provided as decimal strings."
- "The possible properties in this dictionary are the properties
defined as writeable in the data model for CPC resources."
required: false
default: No property changes.
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about the CPC
zhmc_cpc:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
name: "{{ my_cpc_name }}"
state: facts
register: cpc1
- name: Ensure the CPC has the desired property values
zhmc_cpc:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
name: "{{ my_cpc_name }}"
state: set
properties:
acceptable_status:
- active
description: "This is CPC {{ my_cpc_name }}"
"""
RETURN = """
cpc:
description:
- "For C(state=set|facts), a
dictionary with the properties of the CPC. The properties contain
these additional artificial properties for listing its child resources:
- 'partitions': The defined partitions of the CPC, as a dict of key:
partition name, value: dict of a subset of the partition properties
(name, status, object-uri).
- 'adapters': The adapters of the CPC, as a dict of key:
adapter name, value: dict of a subset of the adapter properties
(name, status, object-uri).
- 'storage-groups': The storage groups associated with the CPC, as a
dict of key: storage group name, value: dict of a subset of the
storage group properties (name, fulfillment-status, object-uri)."
returned: success
type: dict
sample: |
C({
"name": "CPCA",
"description": "CPC A",
"status": "active",
"acceptable-status": [ "active" ],
...
"partitions": [
{
"name": "part-1",
...
},
...
],
"adapters": [
{
"name": "adapter-1",
...
},
...
],
"storage-groups": [
{
"name": "sg-1",
...
},
...
],
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_cpc'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of CPC resources, in this format:
# name: (allowed, create, update, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Not applicable for CPCs.
# update: Indicates whether it can be specified for the "Modify CPC
# Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Modify
# CPC Properties" operation while the CPC is active. None means
# "not applicable" (used for update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_CPC_PROPERTIES = {
# update properties:
'description': (True, None, True, True, None, to_unicode),
'acceptable_status': (True, None, True, True, None, None),
# read-only properties (subset):
'name': (False, None, False, None, None, None), # provided in 'name' parm
'object_uri': (False, None, False, None, None, None),
'object_id': (False, None, False, None, None, None),
'parent': (False, None, False, None, None, None),
'class': (False, None, False, None, None, None),
}
def process_properties(cpc, params):
"""
Process the properties specified in the 'properties' module parameter,
and return a dictionary (update_props) that contains the properties that
can be updated. The input property values are compared with the existing
resource property values and the returned set of properties is the minimal
set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of properties that cannot be updated is surfaced by raising
ParameterError.
Parameters:
cpc (zhmcclient.Cpc): CPC to be updated.
params (dict): Module input parameters.
Returns:
update_props: dict of properties for zhmcclient.Cpc.update_properties()
Raises:
ParameterError: An issue with the module parameters.
"""
input_props = params.get('properties', None)
if input_props is None:
input_props = {}
update_props = {}
for prop_name in input_props:
try:
allowed, create, update, update_active, eq_func, type_cast = \
ZHMC_CPC_PROPERTIES[prop_name]
except KeyError:
allowed = False
if not allowed:
raise ParameterError(
"CPC property {!r} specified in the 'properties' module "
"parameter cannot be updated.".format(prop_name))
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_CPC_PROPERTIES, input_props, cpc)
update_props.update(_update_props)
assert not _create_props
assert _stop is False
return update_props
def add_artificial_properties(cpc):
"""
Add artificial properties to the CPC object.
Upon return, the properties of the cpc object have been
extended by these artificial properties:
* 'partitions': List of partitions of the CPC, with the list subset of
their properties.
* 'adapters': List of adapters of the CPC, with the list subset of their
properties.
* 'storage-groups': List of storage groups attached to the partition, with
the list subset of their properties.
"""
partitions = cpc.partitions.list()
cpc.properties['partitions'] = [p.properties for p in partitions]
adapters = cpc.adapters.list()
cpc.properties['adapters'] = [a.properties for a in adapters]
storage_groups = cpc.manager.console.storage_groups.list(
filter_args={'cpc-uri': cpc.uri})
cpc.properties['storage-groups'] = [sg.properties
for sg in storage_groups]
def ensure_set(params, check_mode):
"""
Identify the target CPC and ensure that the specified properties are set on
the target CPC.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['name']
faked_session = params.get('faked_session', None) # No default specified
changed = False
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
cpc.pull_full_properties()
update_props = process_properties(cpc, params)
if update_props:
if not check_mode:
cpc.update_properties(update_props)
# Some updates of CPC properties are not reflected in a new
# retrieval of properties until after a few seconds (usually the
# second retrieval).
# Therefore, we construct the modified result based upon the input
# changes, and not based upon newly retrieved properties.
cpc.properties.update(update_props)
changed = True
add_artificial_properties(cpc)
result = cpc.properties
return changed, result
finally:
session.logoff()
def facts(params, check_mode):
"""
Identify the target CPC and return facts about the target CPC and its
child resources.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['name']
faked_session = params.get('faked_session', None) # No default specified
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
cpc.pull_full_properties()
add_artificial_properties(cpc)
result = cpc.properties
return False, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"set": ensure_set,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
name=dict(required=True, type='str'),
state=dict(required=True, type='str', choices=['set', 'facts']),
properties=dict(required=False, type='dict', default={}),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(
changed=changed, cpc=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_cpc.py | zhmc_cpc.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
get_hmc_auth, get_session, to_unicode, process_normal_property
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_storage_group
version_added: "0.5"
short_description: Manages DPM storage groups (with "dpm-storage-management"
feature)
description:
- Gathers facts about a storage group associated with a CPC, including its
storage volumes and virtual storage resources.
- Creates, deletes and updates a storage group associated with a CPC.
notes:
- The CPC that is associated with the target storage group must be in the
Dynamic Partition Manager (DPM) operational mode and must have the
"dpm-storage-management" firmware feature enabled.
That feature has been introduced with the z14-ZR1 / Rockhopper II machine
generation.
- This module performs actions only against the Z HMC regarding the
definition of storage group objects and their attachment to partitions.
This module does not perform any actions against storage subsystems or
SAN switches.
- Attachment of a storage group to and from partitions is managed by the
Ansible module zhmc_storage_group_attachment.
- The Ansible module zhmc_hba is no longer used on CPCs that have the
"dpm-storage-management" feature enabled.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
- Juergen Leopold (@leopoldjuergen, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.20.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC associated with the target storage group.
required: true
name:
description:
- The name of the target storage group.
required: true
state:
description:
- "The desired state for the target storage group:"
- "* C(absent): Ensures that the storage group does not exist. If the
storage group is currently attached to any partitions, the module will
fail."
- "* C(present): Ensures that the storage group exists and is associated
with the specified CPC, and has the specified properties. The
attachment state of the storage group to a partition is not changed."
- "* C(facts): Does not change anything on the storage group and returns
the storage group properties."
required: true
choices: ['absent', 'present', 'facts']
properties:
description:
- "Dictionary with desired properties for the storage group.
Used for C(state=present); ignored for C(state=absent|facts).
Dictionary key is the property name with underscores instead
of hyphens, and dictionary value is the property value in YAML syntax.
Integer properties may also be provided as decimal strings."
- "The possible input properties in this dictionary are the properties
defined as writeable in the data model for Storage Group resources
(where the property names contain underscores instead of hyphens),
with the following exceptions:"
- "* C(name): Cannot be specified because the name has already been
specified in the C(name) module parameter."
- "* C(type): Cannot be changed once the storage group exists."
- "Properties omitted in this dictionary will remain unchanged when the
storage group already exists, and will get the default value defined
in the data model for storage groups in the HMC API book when the
storage group is being created."
required: false
default: No properties.
expand:
description:
- "Boolean that controls whether the returned storage group contains
additional artificial properties that expand certain URI or name
properties to the full set of resource properties (see description of
return values of this module)."
required: false
type: bool
default: false
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about a storage group
zhmc_storage_group:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_storage_group_name }}"
state: facts
expand: true
register: sg1
- name: Ensure the storage group does not exist
zhmc_storage_group:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_storage_group_name }}"
state: absent
- name: Ensure the storage group exists
zhmc_storage_group:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_storage_group_name }}"
state: present
expand: true
properties:
description: "Example storage group 1"
type: fcp
shared: false
connectivity: 4
max-partitions: 1
register: sg1
"""
RETURN = """
storage_group:
description:
- "For C(state=absent), an empty dictionary."
- "For C(state=present|facts), a
dictionary with the resource properties of the target storage group,
plus additional artificial properties as described in the following
list items.
The dictionary keys are the exact property names as described in the
data model for the resource, i.e. they contain hyphens (-), not
underscores (_). The dictionary values are the property values using the
Python representations described in the documentation of the zhmcclient
Python package.
The additional artificial properties are:"
- "* C(attached-partition-names): List of partition names to which the
storage group is attached."
- "* C(cpc-name): Name of the CPC that is associated to this storage
group."
- "* C(candidate-adapter-ports) (only if expand was requested):
List of candidate adapter ports of the storage group. Each port is
represented as a dictionary of its properties; in addition each port has
an artificial property C(parent-adapter) which represents the adapter of
the port. Each adapter is represented as a dictionary of its
properties."
- "* C(storage-volumes) (only if expand was requested):
List of storage volumes of the storage group. Each storage volume is
represented as a dictionary of its properties."
- "* C(virtual-storage-resources) (only if expand was requested):
List of virtual storage resources of the storage group. Each virtual
storage resource is represented as a dictionary of its properties."
- "* C(attached-partitions) (only if expand was requested):
List of partitions to which the storage group is attached. Each
partition is represented as a dictionary of its properties."
- "* C(cpc) (only if expand was requested): The CPC that is associated to
this storage group. The CPC is represented as a dictionary of its
properties."
returned: success
type: dict
sample: |
C({
"name": "sg-1",
"description": "storage group #1",
...
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_storage_group'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of storage group resources, in this format:
# name: (allowed, create, update, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Indicates whether it can be specified for the "Create Storage
# Group" operation.
# update: Indicates whether it can be specified for the "Modify Storage
# Group Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Modify
# Storage Group Properties" operation while the storage group is attached
# to any partition. None means "not applicable" (used for update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_STORAGE_GROUP_PROPERTIES = {
# create-only properties:
'cpc-uri': (False, True, False, None, None, None), # auto-generated here
'type': (True, True, False, None, None, None),
# update-only properties: None
# non-data model properties:
'storage-volumes': (False, True, True, None, None, None),
# storage-volumes is a request-info object and is not part of the data
# model. Changes to storage volumes are performed via the
# zhmc_storage_volume.py module.
# create+update properties:
'name': (False, True, True, True, None, None),
# name: provided in 'name' module parm
'description': (True, True, True, True, None, to_unicode),
'shared': (True, True, True, True, None, None),
'connectivity': (True, True, True, True, None, int),
'max-partitions': (True, True, True, True, None, int),
'virtual-machine-count': (True, True, True, True, None, int),
'email-to-addresses': (True, True, True, True, None, None),
'email-cc-addresses': (True, True, True, True, None, None),
'email-insert': (True, True, True, True, None, None),
# read-only properties:
'object_uri': (False, False, False, None, None, None),
'object_id': (False, False, False, None, None, None),
'parent': (False, False, False, None, None, None),
'class': (False, False, False, None, None, None),
'fulfillment-state': (False, False, False, None, None, None),
'storage-volume-uris': (False, False, False, None, None, None),
# storage-volume-uris: updated via method
'virtual-storageresource-uris': (False, False, False, None, None, None),
'active-connectivity': (False, False, False, None, None, None),
'active-max-partitions': (False, False, False, None, None, None),
'candidate-adapter-port-uris': (False, False, False, None, None, None),
# candidate-adapter-port-uris: updated via method
'unassigned-worldwide-port-names': (False, False, False, None, None, None),
}
def process_properties(cpc, storage_group, params):
"""
Process the properties specified in the 'properties' module parameter,
and return two dictionaries (create_props, update_props) that contain
the properties that can be created, and the properties that can be updated,
respectively. If the resource exists, the input property values are
compared with the existing resource property values and the returned set
of properties is the minimal set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of read-only properties, invalid properties (i.e. not
defined in the data model for storage groups), and properties that are
not allowed because of restrictions or because they are auto-created from
an artificial property is surfaced by raising ParameterError.
- The properties resulting from handling artificial properties are
added to the returned dictionaries.
Parameters:
cpc (zhmcclient.Cpc): CPC with the partition to be updated, and
with the adapters to be used for the partition.
storage_group (zhmcclient.StorageGroup): Storage group object to be
updated with the full set of current properties, or `None` if it did
not previously exist.
params (dict): Module input parameters.
Returns:
tuple of (create_props, update_props), where:
* create_props: dict of properties for
zhmcclient.StorageGroupManager.create()
* update_props: dict of properties for
zhmcclient.StorageGroup.update_properties()
Raises:
ParameterError: An issue with the module parameters.
"""
create_props = {}
update_props = {}
# handle 'name' and 'cpc-uri' properties.
sg_name = to_unicode(params['name'])
if storage_group is None:
# SG does not exist yet.
create_props['name'] = sg_name
create_props['cpc-uri'] = cpc.uri
else:
# SG does already exist.
# We looked up the storage group by name, so we will never have to
# update the storage group name.
# Updates to the associated CPC are not possible.
sg_cpc = storage_group.cpc
if sg_cpc.uri != cpc.uri:
raise ParameterError(
"Storage group {!r} is not associated with the specified "
"CPC {!r}, but with CPC {!r}.".
format(sg_name, cpc.name, sg_cpc.name))
# handle the other properties
input_props = params.get('properties', None)
if input_props is None:
input_props = {}
for prop_name in input_props:
if prop_name not in ZHMC_STORAGE_GROUP_PROPERTIES:
raise ParameterError(
"Property {!r} is not defined in the data model for "
"storage groups.".format(prop_name))
allowed, create, update, update_while_active, eq_func, type_cast = \
ZHMC_STORAGE_GROUP_PROPERTIES[prop_name]
if not allowed:
raise ParameterError(
"Property {!r} is not allowed in the 'properties' module "
"parameter.".format(prop_name))
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_STORAGE_GROUP_PROPERTIES, input_props,
storage_group)
create_props.update(_create_props)
update_props.update(_update_props)
assert _stop is False
return create_props, update_props
def add_artificial_properties(storage_group, expand):
"""
Add artificial properties to the storage_group object.
Upon return, the properties of the storage_group object have been
extended by these properties:
Regardless of expand:
* 'attached-partition-names': List of Partition names to which the storage
group is attached.
If expand is True:
* 'candidate-adapter-ports': List of Port objects, each of which is
represented as its dictionary of properties.
The Port properties are extended by these properties:
- 'parent-adapter': Adapter object of the port, represented as its
dictionary of properties.
* 'storage-volumes': List of StorageVolume objects, each of which is
represented as its dictionary of properties.
* 'virtual-storage-resources': List of VirtualStorageResource objects,
each of which is represented as its dictionary of properties.
* 'attached-partitions': List of Partition objects to which the storage
group is attached. Each Partition object is represented as a dictionary
of its properties.
"""
parts = storage_group.list_attached_partitions()
# List of attached partitions (just the names)
part_names_prop = list()
for part in parts:
part_names_prop.append(part.get_property('name'))
storage_group.properties['attached-partition-names'] = part_names_prop
if expand:
# Candidate adapter ports and their parent adapters (full set of props)
caps_prop = list()
for cap in storage_group.list_candidate_adapter_ports(
full_properties=True):
adapter = cap.manager.adapter
adapter.pull_full_properties()
cap.properties['parent-adapter'] = adapter.properties
caps_prop.append(cap.properties)
storage_group.properties['candidate-adapter-ports'] = caps_prop
# Storage volumes (full set of properties).
# Note: We create the storage volumes from the 'storage-volume-uris'
# property, because the 'List Storage Volumes of a Storage Group'
# operation returns an empty list for auto-discovered volumes.
svs_prop = list()
sv_uris = storage_group.get_property('storage-volume-uris')
for sv_uri in sv_uris:
sv = storage_group.storage_volumes.resource_object(sv_uri)
sv.pull_full_properties()
svs_prop.append(sv.properties)
storage_group.properties['storage-volumes'] = svs_prop
# Virtual storage resources (full set of properties).
vsrs_prop = list()
vsr_uris = storage_group.get_property('virtual-storage-resource-uris')
for vsr_uri in vsr_uris:
vsr = storage_group.virtual_storage_resources.resource_object(
vsr_uri)
vsr.pull_full_properties()
vsrs_prop.append(vsr.properties)
storage_group.properties['virtual-storage-resources'] = vsrs_prop
# List of attached partitions (full set of properties).
parts = storage_group.list_attached_partitions()
parts_prop = list()
for part in parts:
part.pull_full_properties()
parts_prop.append(part.properties)
storage_group.properties['attached-partitions'] = parts_prop
def ensure_present(params, check_mode):
"""
Ensure that the storage group exists and has the specified properties.
Storage volumes are not subject of this function, they are handled by the
zhmc_storage_volume.py module.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['name']
expand = params['expand']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
storage_group = console.storage_groups.find(
name=storage_group_name)
except zhmcclient.NotFound:
storage_group = None
if storage_group is None:
# It does not exist. Create it and update it if there are
# update-only properties.
if not check_mode:
create_props, update_props = \
process_properties(cpc, storage_group, params)
storage_group = console.storage_groups.create(
create_props)
update2_props = {}
for name in update_props:
if name not in create_props:
update2_props[name] = update_props[name]
if update2_props:
storage_group.update_properties(update2_props)
# We refresh the properties after the update, in case an
# input property value gets changed.
storage_group.pull_full_properties()
else:
# TODO: Show props in module result also in check mode.
pass
changed = True
else:
# It exists. Update its properties.
storage_group.pull_full_properties()
create_props, update_props = \
process_properties(cpc, storage_group, params)
assert not create_props, \
"Unexpected create_props: {!r}".format(create_props)
if update_props:
if not check_mode:
storage_group.update_properties(update_props)
# We refresh the properties after the update, in case an
# input property value gets changed.
storage_group.pull_full_properties()
else:
# TODO: Show updated props in mod.result also in chk.mode
pass
changed = True
if not check_mode:
assert storage_group
add_artificial_properties(storage_group, expand)
result = storage_group.properties
return changed, result
finally:
session.logoff()
def ensure_absent(params, check_mode):
"""
Ensure that the storage group does not exist.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
storage_group = console.storage_groups.find(
name=storage_group_name)
except zhmcclient.NotFound:
return changed, result
sg_cpc = storage_group.cpc
if sg_cpc.uri != cpc.uri:
raise ParameterError(
"Storage group {!r} is not associated with the specified "
"CPC {!r}, but with CPC {!r}.".
format(storage_group_name, cpc.name, sg_cpc.name))
if not check_mode:
partitions = storage_group.list_attached_partitions()
for part in partitions:
# This will raise HTTPError(409) if the partition is in one of
# the transitional states ('starting', 'stopping').
part.detach_storage_group(storage_group)
storage_group.delete()
changed = True
return changed, result
finally:
session.logoff()
def facts(params, check_mode):
"""
Return facts about a storage group and its storage volumes and virtual
storage resources.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['name']
expand = params['expand']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
# The default exception handling is sufficient for this code
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
storage_group = console.storage_groups.find(name=storage_group_name)
storage_group.pull_full_properties()
sg_cpc = storage_group.cpc
if sg_cpc.uri != cpc.uri:
raise ParameterError(
"Storage group {!r} is not associated with the specified "
"CPC {!r}, but with CPC {!r}.".
format(storage_group_name, cpc.name, sg_cpc.name))
add_artificial_properties(storage_group, expand)
result = storage_group.properties
return changed, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"absent": ensure_absent,
"present": ensure_present,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['absent', 'present', 'facts']),
properties=dict(required=False, type='dict', default={}),
expand=dict(required=False, type='bool', default=False),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(changed=changed, storage_group=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_storage_group.py | zhmc_storage_group.py |
from __future__ import absolute_import, print_function
import logging
from collections import OrderedDict
from ansible.module_utils.basic import AnsibleModule
from operator import itemgetter
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
StatusError, stop_partition, start_partition, \
wait_for_transition_completion, eq_hex, get_hmc_auth, get_session, \
to_unicode, process_normal_property
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_partition
version_added: "0.0"
short_description: Manages partitions
description:
- Gathers facts about a partition, including its child resources (HBAs, NICs
and virtual functions).
- Creates, updates, deletes, starts, and stops partitions in a CPC. The
child resources of the partition are are managed by separate Ansible
modules.
- The targeted CPC must be in the Dynamic Partition Manager (DPM) operational
mode.
notes:
- See also Ansible modules zhmc_hba, zhmc_nic, zhmc_virtual_function.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
- Juergen Leopold (@leopoldjuergen, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.14.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC with the target partition.
required: true
name:
description:
- The name of the target partition.
required: true
state:
description:
- "The desired state for the target partition:"
- "C(absent): Ensures that the partition does not exist in the specified
CPC."
- "C(stopped): Ensures that the partition exists in the specified CPC,
has the specified properties, and is in the 'stopped' status."
- "C(active): Ensures that the partition exists in the specified CPC,
has the specified properties, and is in the 'active' or 'degraded'
status."
- "C(facts): Does not change anything on the partition and returns
the partition properties and the properties of its child resources
(HBAs, NICs, and virtual functions)."
required: true
choices: ['absent', 'stopped', 'active', 'facts']
properties:
description:
- "Dictionary with input properties for the partition, for
C(state=stopped) and C(state=active). Key is the property name with
underscores instead of hyphens, and value is the property value in
YAML syntax. Integer properties may also be provided as decimal
strings. Will be ignored for C(state=absent)."
- "The possible input properties in this dictionary are the properties
defined as writeable in the data model for Partition resources
(where the property names contain underscores instead of hyphens),
with the following exceptions:"
- "* C(name): Cannot be specified because the name has already been
specified in the C(name) module parameter."
- "* C(type): Cannot be changed once the partition exists, because
updating it is not supported."
- "* C(boot_storage_device): Cannot be specified because this information
is specified using the artificial property C(boot_storage_hba_name)."
- "* C(boot_network_device): Cannot be specified because this information
is specified using the artificial property C(boot_network_nic_name)."
- "* C(boot_storage_hba_name): The name of the HBA whose URI is used to
construct C(boot_storage_device). Specifying it requires that the
partition exists."
- "* C(boot_network_nic_name): The name of the NIC whose URI is used to
construct C(boot_network_device). Specifying it requires that the
partition exists."
- "* C(crypto_configuration): The crypto configuration for the partition,
in the format of the C(crypto-configuration) property of the
partition (see HMC API book for details), with the exception that
adapters are specified with their names in field
C(crypto_adapter_names) instead of their URIs in field
C(crypto_adapter_uris). If the C(crypto_adapter_names) field is null,
all crypto adapters of the CPC will be used."
- "Properties omitted in this dictionary will remain unchanged when the
partition already exists, and will get the default value defined in
the data model for partitions in the HMC API book when the partition
is being created."
required: false
default: No input properties
expand_storage_groups:
description:
- "Boolean that controls whether the returned partition contains
an additional artificial property 'storage-groups' that is the list
of storage groups attached to the partition, with properties as
described for the zhmc_storage_group module with expand=true."
required: false
type: bool
default: false
expand_crypto_adapters:
description:
- "Boolean that controls whether the returned partition contains
an additional artificial property 'crypto-adapters' in its
'crypto-configuration' property that is the list
of crypto adapters attached to the partition, with properties as
described for the zhmc_adapter module."
required: false
type: bool
default: false
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
# Because configuring LUN masking in the SAN requires the host WWPN, and the
# host WWPN is automatically assigned and will be known only after an HBA has
# been added to the partition, the partition needs to be created in stopped
# state. Also, because the HBA has not yet been created, the boot
# configuration cannot be done yet:
- name: Ensure the partition exists and is stopped
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: stopped
properties:
description: "zhmc Ansible modules: Example partition 1"
ifl_processors: 2
initial_memory: 1024
maximum_memory: 1024
register: part1
# After an HBA has been added (see Ansible module zhmc_hba), and LUN masking
# has been configured in the SAN, and a bootable image is available at the
# configured LUN and target WWPN, the partition can be configured for boot
# from the FCP LUN and can be started:
- name: Configure boot device and start the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: active
properties:
boot_device: storage-adapter
boot_storage_device_hba_name: hba1
boot_logical_unit_number: 00000000001
boot_world_wide_port_name: abcdefabcdef
register: part1
- name: Ensure the partition does not exist
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: absent
- name: Define crypto configuration
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: active
properties:
crypto_configuration:
crypto_adapter_names:
- adapter1
- adapter2
crypto_domain_configurations:
- domain_index: 0
access_mode: control-usage
- domain_index: 1
access_mode: control
register: part1
- name: Gather facts about a partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
name: "{{ my_partition_name }}"
state: facts
expand_storage_groups: true
expand_crypto_adapters: true
register: part1
"""
RETURN = """
partition:
description:
- "For C(state=absent), an empty dictionary."
- "For C(state=stopped) and C(state=active), a dictionary with the resource
properties of the partition (after changes, if any). The dictionary
keys are the exact property names as described in the data model for the
resource, i.e. they contain hyphens (-), not underscores (_). The
dictionary values are the property values using the Python
representations described in the documentation of the zhmcclient Python
package."
- "For C(state=facts), a dictionary with the resource properties of the
partition, including its child resources (HBAs, NICs, and virtual
functions). The dictionary keys are the exact property names as
described in the data model for the resource, i.e. they contain hyphens
(-), not underscores (_). The dictionary values are the property values
using the Python representations described in the documentation of the
zhmcclient Python package. The properties of the child resources are
represented in partition properties named 'hbas', 'nics', and
'virtual-functions', respectively. The NIC properties are amended by
artificial properties 'adapter-name', 'adapter-port', 'adapter-id'."
returned: success
type: dict
sample: |
C({
"name": "part-1",
"description": "partition #1",
"status": "active",
"boot-device": "storage-adapter",
...
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_partition'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of partition resources, in this format:
# name: (allowed, create, update, update_while_active, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Indicates whether it can be specified for the "Create Partition"
# operation.
# update: Indicates whether it can be specified for the "Update Partition
# Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Update
# Partition Properties" operation while the partition is active. None means
# "not applicable" (i.e. update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_PARTITION_PROPERTIES = {
# create-only properties:
'type': (True, True, False, None, None, None), # cannot change type
# update-only properties:
'boot_network_device': (
False, False, True, True, None, None), # via boot_network_nic_name
'boot_network_nic_name': (
True, False, True, True, None, to_unicode), # artificial property
'boot_storage_device': (
False, False, True, True, None, None), # via boot_storage_hba_name
'boot_storage_hba_name': (
True, False, True, True, None, to_unicode), # artificial property
'crypto_configuration': (
True, False, False, None, None,
None), # Contains artificial properties, type_cast ignored
'acceptable_status': (True, False, True, True, None, None),
'processor_management_enabled': (True, False, True, True, None, None),
'ifl_absolute_processor_capping': (True, False, True, True, None, None),
'ifl_absolute_processor_capping_value': (
True, False, True, True, None, float),
'ifl_processing_weight_capped': (True, False, True, True, None, None),
'minimum_ifl_processing_weight': (True, False, True, True, None, int),
'maximum_ifl_processing_weight': (True, False, True, True, None, int),
'initial_ifl_processing_weight': (True, False, True, True, None, int),
'cp_absolute_processor_capping': (True, False, True, True, None, None),
'cp_absolute_processor_capping_value': (
True, False, True, True, None, float),
'cp_processing_weight_capped': (True, False, True, True, None, None),
'minimum_cp_processing_weight': (True, False, True, True, None, int),
'maximum_cp_processing_weight': (True, False, True, True, None, int),
'initial_cp_processing_weight': (True, False, True, True, None, int),
'boot_logical_unit_number': (True, False, True, True, eq_hex, None),
'boot_world_wide_port_name': (True, False, True, True, eq_hex, None),
'boot_os_specific_parameters': (True, False, True, True, None, to_unicode),
'boot_iso_ins_file': (True, False, True, True, None, to_unicode),
'ssc_boot_selection': (True, False, True, True, None, None),
# create+update properties:
'name': (
False, True, True, True, None, None), # provided in 'name' module parm
'description': (True, True, True, True, None, to_unicode),
'short_name': (True, True, True, False, None, None),
'partition_id': (True, True, True, False, None, None),
'autogenerate_partition_id': (True, True, True, False, None, None),
'ifl_processors': (True, True, True, True, None, int),
'cp_processors': (True, True, True, True, None, int),
'processor_mode': (True, True, True, False, None, None),
'initial_memory': (True, True, True, True, None, int),
'maximum_memory': (True, True, True, False, None, int),
'reserve_resources': (True, True, True, True, None, None),
'boot_device': (True, True, True, True, None, None),
'boot_timeout': (True, True, True, True, None, int),
'boot_ftp_host': (True, True, True, True, None, to_unicode),
'boot_ftp_username': (True, True, True, True, None, to_unicode),
'boot_ftp_password': (True, True, True, True, None, to_unicode),
'boot_ftp_insfile': (True, True, True, True, None, to_unicode),
'boot_removable_media': (True, True, True, True, None, to_unicode),
'boot_removable_media_type': (True, True, True, True, None, None),
'boot_configuration_selector': (True, True, True, True, None, int),
'boot_record_lba': (True, True, True, True, None, None),
'access_global_performance_data': (True, True, True, True, None, None),
'permit_cross_partition_commands': (True, True, True, True, None, None),
'access_basic_counter_set': (True, True, True, True, None, None),
'access_problem_state_counter_set': (True, True, True, True, None, None),
'access_crypto_activity_counter_set': (True, True, True, True, None, None),
'access_extended_counter_set': (True, True, True, True, None, None),
'access_coprocessor_group_set': (True, True, True, True, None, None),
'access_basic_sampling': (True, True, True, True, None, None),
'access_diagnostic_sampling': (True, True, True, True, None, None),
'permit_des_key_import_functions': (True, True, True, True, None, None),
'permit_aes_key_import_functions': (True, True, True, True, None, None),
'ssc_host_name': (True, True, True, True, None, to_unicode),
'ssc_ipv4_gateway': (True, True, True, True, None, to_unicode),
'ssc_dns_servers': (True, True, True, True, None, to_unicode),
'ssc_master_userid': (True, True, True, True, None, to_unicode),
'ssc_master_pw': (True, True, True, True, None, to_unicode),
# read-only properties:
'object_uri': (False, False, False, None, None, None),
'object_id': (False, False, False, None, None, None),
'parent': (False, False, False, None, None, None),
'class': (False, False, False, None, None, None),
'status': (False, False, False, None, None, None),
'has_unacceptable_status': (False, False, False, None, None, None),
'is_locked': (False, False, False, None, None, None),
'os_name': (False, False, False, None, None, None),
'os_type': (False, False, False, None, None, None),
'os_version': (False, False, False, None, None, None),
'degraded_adapters': (False, False, False, None, None, None),
'current_ifl_processing_weight': (False, False, False, None, None, None),
'current_cp_processing_weight': (False, False, False, None, None, None),
'reserved_memory': (False, False, False, None, None, None),
'auto_start': (False, False, False, None, None, None),
'boot_iso_image_name': (False, False, False, None, None, None),
'threads_per_processor': (False, False, False, None, None, None),
'virtual_function_uris': (False, False, False, None, None, None),
'nic_uris': (False, False, False, None, None, None),
'hba_uris': (False, False, False, None, None, None),
}
def process_properties(cpc, partition, params):
"""
Process the properties specified in the 'properties' module parameter,
and return two dictionaries (create_props, update_props) that contain
the properties that can be created, and the properties that can be updated,
respectively. If the resource exists, the input property values are
compared with the existing resource property values and the returned set
of properties is the minimal set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of read-only properties, invalid properties (i.e. not
defined in the data model for partitions), and properties that are not
allowed because of restrictions or because they are auto-created from
an artificial property is surfaced by raising ParameterError.
- The properties resulting from handling artificial properties are
added to the returned dictionaries.
Parameters:
cpc (zhmcclient.Cpc): CPC with the partition to be updated, and
with the adapters to be used for the partition.
partition (zhmcclient.Partition): Partition to be updated with the full
set of current properties, or `None` if it did not previously exist.
params (dict): Module input parameters.
Returns:
tuple of (create_props, update_props, stop, crypto_changes), where:
* create_props: dict of properties for
zhmcclient.PartitionManager.create()
* update_props: dict of properties for
zhmcclient.Partition.update_properties()
* stop (bool): Indicates whether some update properties require the
partition to be stopped when doing the update.
* crypto_changes (tuple): Changes to the crypto configuration if any
(or `None` if no changes were specified), as a tuple of:
* remove_adapters: List of Adapter objects to be removed
* remove_domain_indexes: List of domain indexes to be removed
* add_adapters: List of Adapter objects to be added
* add_domain_configs: List of domain configs to be added (dict of
'domain-index', 'access-mode')
* change_domain_configs: List of domain configs for changing the
access mode of existing domain indexes.
Raises:
ParameterError: An issue with the module parameters.
"""
create_props = {}
update_props = {}
stop = False
crypto_changes = None
# handle 'name' property
part_name = to_unicode(params['name'])
create_props['name'] = part_name
# We looked up the partition by name, so we will never have to update
# the partition name
# handle the other properties
input_props = params.get('properties', {})
if input_props is None:
input_props = {}
for prop_name in input_props:
if prop_name not in ZHMC_PARTITION_PROPERTIES:
raise ParameterError(
"Property {!r} is not defined in the data model for "
"partitions.".format(prop_name))
allowed, create, update, update_while_active, eq_func, type_cast = \
ZHMC_PARTITION_PROPERTIES[prop_name]
if not allowed:
raise ParameterError(
"Property {!r} is not allowed in the 'properties' module "
"parameter.".format(prop_name))
if prop_name == 'boot_storage_hba_name':
# Process this artificial property
if not partition:
raise ParameterError(
"Artificial property {!r} can only be specified when the "
"partition previously exists.".format(prop_name))
if partition.hbas is None:
raise ParameterError(
"Artificial property {!r} can only be specified when the "
"'dpm-storage-management' feature is disabled.".
format(prop_name))
hba_name = input_props[prop_name]
if type_cast:
hba_name = type_cast(hba_name)
try:
hba = partition.hbas.find(name=hba_name)
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not name an existing HBA: "
"{!r}".format(prop_name, hba_name))
hmc_prop_name = 'boot-storage-device'
if partition.properties.get(hmc_prop_name) != hba.uri:
update_props[hmc_prop_name] = hba.uri
assert update_while_active
elif prop_name == 'boot_network_nic_name':
# Process this artificial property
if not partition:
raise ParameterError(
"Artificial property {!r} can only be specified when the "
"partition previously exists.".format(prop_name))
nic_name = input_props[prop_name]
if type_cast:
nic_name = type_cast(nic_name)
try:
nic = partition.nics.find(name=nic_name)
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not name an existing NIC: "
"{!r}".format(prop_name, nic_name))
hmc_prop_name = 'boot-network-device'
if partition.properties.get(hmc_prop_name) != nic.uri:
update_props[hmc_prop_name] = nic.uri
assert update_while_active
elif prop_name == 'crypto_configuration':
# Process this artificial property
crypto_config = input_props[prop_name]
if not isinstance(crypto_config, dict):
raise ParameterError(
"Artificial property {!r} is not a dictionary: {!r}.".
format(prop_name, crypto_config))
if partition:
hmc_prop_name = 'crypto-configuration'
current_crypto_config = partition.properties.get(hmc_prop_name)
else:
current_crypto_config = None
# Determine adapter changes
try:
adapter_field_name = 'crypto_adapter_names'
adapter_names = crypto_config[adapter_field_name]
except KeyError:
raise ParameterError(
"Artificial property {!r} does not have required field "
"{!r}.".format(prop_name, adapter_field_name))
adapter_uris = set()
adapter_dict = {} # adapters by uri
if adapter_names is None:
# Default: Use all crypto adapters of the CPC
adapters = cpc.adapters.findall(type='crypto')
for adapter in adapters:
adapter_dict[adapter.uri] = adapter
adapter_uris.add(adapter.uri)
else:
for adapter_name in adapter_names:
try:
adapter = cpc.adapters.find(name=adapter_name,
type='crypto')
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not specify the "
"name of an existing crypto adapter in its {!r} "
"field: {!r}".
format(prop_name, adapter_field_name,
adapter_name))
adapter_dict[adapter.uri] = adapter
adapter_uris.add(adapter.uri)
if current_crypto_config:
current_adapter_uris = set(
current_crypto_config['crypto-adapter-uris'])
else:
current_adapter_uris = set()
if adapter_uris != current_adapter_uris:
add_adapter_uris = adapter_uris - current_adapter_uris
# Result: List of adapters to be added:
add_adapters = [adapter_dict[uri] for uri in add_adapter_uris]
remove_adapter_uris = current_adapter_uris - adapter_uris
for uri in remove_adapter_uris:
adapter = cpc.adapters.find(**{'object-uri': uri})
# We assume the current crypto config lists only valid URIs
adapter_dict[adapter.uri] = adapter
# Result: List of adapters to be removed:
remove_adapters = \
[adapter_dict[uri] for uri in remove_adapter_uris]
else:
# Result: List of adapters to be added:
add_adapters = []
# Result: List of adapters to be removed:
remove_adapters = []
# Determine domain config changes.
try:
config_field_name = 'crypto_domain_configurations'
domain_configs = crypto_config[config_field_name]
except KeyError:
raise ParameterError(
"Artificial property {!r} does not have required field "
"{!r}.".format(prop_name, config_field_name))
di_field_name = 'domain_index'
am_field_name = 'access_mode'
domain_indexes = set()
for dc in domain_configs:
try:
# Convert to integer in case the domain index is provided
# as a string:
domain_index = int(dc[di_field_name])
except KeyError:
raise ParameterError(
"Artificial property {!r} does not have required "
"sub-field {!r} in one of its {!r} fields.".
format(prop_name, di_field_name, config_field_name))
domain_indexes.add(domain_index)
current_access_mode_dict = {} # dict: acc.mode by dom.index
if current_crypto_config:
current_domain_configs = \
current_crypto_config['crypto-domain-configurations']
di_prop_name = 'domain-index'
am_prop_name = 'access-mode'
for dc in current_domain_configs:
# Here the domain index is always an integer because it is
# returned from the HMC that way, so no type cast needed.
current_access_mode_dict[dc[di_prop_name]] = \
dc[am_prop_name]
current_domain_indexes = \
set([di for di in current_access_mode_dict])
# Result: List of domain indexes to be removed:
remove_domain_indexes = \
list(current_domain_indexes - domain_indexes)
# Building result: List of domain configs to be added:
add_domain_configs = []
# Building result: List of domain configs to be changed:
change_domain_configs = []
for dc in domain_configs:
# Convert to integer in case the domain index is provided
# as a string:
domain_index = int(dc[di_field_name])
try:
access_mode = dc[am_field_name]
except KeyError:
raise ParameterError(
"Artificial property {!r} does not have required "
"sub-field {!r} in one of its {!r} fields.".
format(prop_name, am_field_name, config_field_name))
hmc_domain_config = {
'domain-index': domain_index,
'access-mode': access_mode,
}
if domain_index not in current_access_mode_dict:
# Domain is not included yet
add_domain_configs.append(hmc_domain_config)
elif access_mode != current_access_mode_dict[domain_index]:
# Domain is included but access mode needs to be changed
change_domain_configs.append(hmc_domain_config)
crypto_changes = (remove_adapters, remove_domain_indexes,
add_adapters, add_domain_configs,
change_domain_configs)
else:
# Process a normal (= non-artificial) property
if prop_name == 'ssc_ipv4_gateway':
# Undo conversion from None to empty string in Ansible
if input_props[prop_name] == '':
input_props[prop_name] = None
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_PARTITION_PROPERTIES, input_props, partition)
create_props.update(_create_props)
update_props.update(_update_props)
if _stop:
stop = True
return create_props, update_props, stop, crypto_changes
def change_crypto_config(partition, crypto_changes, check_mode):
"""
Change the crypto configuration of the partition as specified.
Returns whether the crypto configuration has or would have changed.
"""
remove_adapters, remove_domain_indexes, \
add_adapters, add_domain_configs, \
change_domain_configs = crypto_changes
changed = False
# We process additions first, in order to avoid
# HTTPError 409,111 (At least one 'usage' required).
if add_adapters or add_domain_configs:
if not check_mode:
partition.increase_crypto_config(add_adapters,
add_domain_configs)
changed = True
if change_domain_configs:
# We process changes that set access mode 'control-usage' first,
# in order to avoid HTTPError 409,111 (At least one 'usage' required).
for domain_config in sorted(change_domain_configs,
key=itemgetter('access-mode'),
reverse=True):
domain_index = domain_config['domain-index']
access_mode = domain_config['access-mode']
if not check_mode:
partition.change_crypto_domain_config(domain_index,
access_mode)
changed = True
if remove_adapters or remove_domain_indexes:
if not check_mode:
partition.decrease_crypto_config(remove_adapters,
remove_domain_indexes)
changed = True
return changed
def add_artificial_properties(
partition, expand_storage_groups, expand_crypto_adapters):
"""
Add artificial properties to the partition object.
Upon return, the properties of the partition object have been
extended by these artificial properties:
* 'hbas': List of Hba objects of the partition.
* 'nics': List of Nic objects of the partition, with their properties
and these artificial properties:
* 'adapter-name'
* 'adapter-port'
* 'adapter-id'
* 'virtual-functions': List of VirtualFunction objects of the partition.
and if expand_storage_groups is True:
* 'storage-groups': List of StorageGroup objects representing the
storage groups attached to the partition, with their properties
and these artificial properties:
* 'candidate-adapter-ports': List of Port objects representing the
candidate adapter ports of the storage group, with their properties
and these artificial properties:
- 'parent-adapter': Adapter object of the port.
* 'storage-volumes': List of StorageVolume objects of the storage
group.
* 'virtual-storage-resources': List of VirtualStorageResource objects
of the storage group.
and if expand_crypto_adapters is True:
* 'crypto-adapters' in 'crypto-configuration': List of Adapter objects
representing the crypto adapters assigned to the partition.
"""
cpc = partition.manager.cpc
console = cpc.manager.console
session = cpc.manager.client.session
# Get the HBA child elements of the partition
hbas_prop = list()
if partition.hbas is not None:
for hba in partition.hbas.list(full_properties=True):
hbas_prop.append(hba.properties)
partition.properties['hbas'] = hbas_prop
# Get the NIC child elements of the partition
nics_prop = list()
for nic in partition.nics.list(full_properties=True):
nic_props = OrderedDict()
nic_props.update(nic.properties)
# Add artificial properties adapter-name/-port/-id:
vswitch_uri = nic.prop("virtual-switch-uri", None)
if vswitch_uri:
# OSA, Hipersockets
vswitch = cpc.virtual_switches.find(**{'object-uri': vswitch_uri})
adapter_uri = vswitch.get_property('backing-adapter-uri')
adapter_port = vswitch.get_property('port')
adapter = cpc.adapters.find(**{'object-uri': adapter_uri})
nic_props['adapter-name'] = adapter.name
nic_props['adapter-port'] = adapter_port
nic_props['adapter-id'] = adapter.get_property('adapter-id')
else:
# RoCE, CNA
port_uri = nic.prop("network-adapter-port-uri", None)
assert port_uri
port_props = session.get(port_uri)
adapter_uri = port_props['parent']
adapter = cpc.adapters.find(**{'object-uri': adapter_uri})
nic_props['adapter-name'] = adapter.name
nic_props['adapter-port'] = port_props['index']
nic_props['adapter-id'] = adapter.get_property('adapter-id')
nics_prop.append(nic_props)
partition.properties['nics'] = nics_prop
# Get the VF child elements of the partition
vf_prop = list()
for vf in partition.virtual_functions.list(full_properties=True):
vf_prop.append(vf.properties)
partition.properties['virtual-functions'] = vf_prop
if expand_storage_groups:
sg_prop = list()
for sg_uri in partition.properties['storage-group-uris']:
storage_group = console.storage_groups.resource_object(sg_uri)
storage_group.pull_full_properties()
sg_prop.append(storage_group.properties)
# Candidate adapter ports and their adapters (full set of props)
caps_prop = list()
for cap in storage_group.list_candidate_adapter_ports(
full_properties=True):
adapter = cap.manager.adapter
adapter.pull_full_properties()
cap.properties['parent-adapter'] = adapter.properties
caps_prop.append(cap.properties)
storage_group.properties['candidate-adapter-ports'] = caps_prop
# Storage volumes (full set of properties).
# Note: We create the storage volumes from the
# 'storage-volume-uris' property, because the 'List Storage
# Volumes of a Storage Group' operation returns an empty list for
# auto-discovered volumes.
svs_prop = list()
sv_uris = storage_group.get_property('storage-volume-uris')
for sv_uri in sv_uris:
sv = storage_group.storage_volumes.resource_object(sv_uri)
sv.pull_full_properties()
svs_prop.append(sv.properties)
storage_group.properties['storage-volumes'] = svs_prop
# Virtual storage resources (full set of properties).
vsrs_prop = list()
vsr_uris = storage_group.get_property(
'virtual-storage-resource-uris')
for vsr_uri in vsr_uris:
vsr = storage_group.virtual_storage_resources.resource_object(
vsr_uri)
vsr.pull_full_properties()
vsrs_prop.append(vsr.properties)
storage_group.properties['virtual-storage-resources'] = vsrs_prop
partition.properties['storage-groups'] = sg_prop
if expand_crypto_adapters:
cc = partition.properties['crypto-configuration']
if cc:
ca_prop = list()
for ca_uri in cc['crypto-adapter-uris']:
ca = cpc.adapters.resource_object(ca_uri)
ca.pull_full_properties()
ca_prop.append(ca.properties)
cc['crypto-adapters'] = ca_prop
def ensure_active(params, check_mode):
"""
Ensure that the partition exists, is active or degraded, and has the
specified properties.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['name']
expand_storage_groups = params['expand_storage_groups']
expand_crypto_adapters = params['expand_crypto_adapters']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
partition = cpc.partitions.find(name=partition_name)
partition.pull_full_properties()
except zhmcclient.NotFound:
partition = None
if not partition:
# It does not exist. Create it and update it if there are
# update-only properties.
if not check_mode:
create_props, update_props, stop, crypto_changes = \
process_properties(cpc, partition, params)
partition = cpc.partitions.create(create_props)
update2_props = {}
for name in update_props:
if name not in create_props:
update2_props[name] = update_props[name]
if update2_props:
partition.update_properties(update2_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
partition.pull_full_properties()
if crypto_changes:
change_crypto_config(partition, crypto_changes, check_mode)
else:
# TODO: Show props in module result also in check mode.
pass
changed = True
else:
# It exists. Stop if needed due to property update requirements,
# or wait for an updateable partition status, and update its
# properties.
create_props, update_props, stop, crypto_changes = \
process_properties(cpc, partition, params)
if update_props:
if not check_mode:
if stop:
stop_partition(partition, check_mode)
else:
wait_for_transition_completion(partition)
partition.update_properties(update_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
partition.pull_full_properties()
else:
# TODO: Show updated props in mod.result also in chk.mode
pass
changed = True
if crypto_changes:
changed |= change_crypto_config(partition, crypto_changes,
check_mode)
if partition:
changed |= start_partition(partition, check_mode)
if partition and not check_mode:
partition.pull_full_properties()
status = partition.get_property('status')
if status not in ('active', 'degraded'):
raise StatusError(
"Could not get partition {!r} into an active state, "
"status is: {!r}".format(partition.name, status))
if partition:
add_artificial_properties(
partition, expand_storage_groups, expand_crypto_adapters)
result = partition.properties
return changed, result
finally:
session.logoff()
def ensure_stopped(params, check_mode):
"""
Ensure that the partition exists, is stopped, and has the specified
properties.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['name']
expand_storage_groups = params['expand_storage_groups']
expand_crypto_adapters = params['expand_crypto_adapters']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
partition = cpc.partitions.find(name=partition_name)
partition.pull_full_properties()
except zhmcclient.NotFound:
partition = None
if not partition:
# It does not exist. Create it and update it if there are
# update-only properties.
if not check_mode:
create_props, update_props, stop, crypto_changes = \
process_properties(cpc, partition, params)
partition = cpc.partitions.create(create_props)
update2_props = {}
for name in update_props:
if name not in create_props:
update2_props[name] = update_props[name]
if update2_props:
partition.update_properties(update2_props)
if crypto_changes:
change_crypto_config(partition, crypto_changes, check_mode)
changed = True
else:
# It exists. Stop it and update its properties.
create_props, update_props, stop, crypto_changes = \
process_properties(cpc, partition, params)
changed |= stop_partition(partition, check_mode)
if update_props:
if not check_mode:
partition.update_properties(update_props)
changed = True
if crypto_changes:
changed |= change_crypto_config(partition, crypto_changes,
check_mode)
if partition and not check_mode:
partition.pull_full_properties()
status = partition.get_property('status')
if status not in ('stopped'):
raise StatusError(
"Could not get partition {!r} into a stopped state, "
"status is: {!r}".format(partition.name, status))
if partition:
add_artificial_properties(
partition, expand_storage_groups, expand_crypto_adapters)
result = partition.properties
return changed, result
finally:
session.logoff()
def ensure_absent(params, check_mode):
"""
Ensure that the partition does not exist.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
partition = cpc.partitions.find(name=partition_name)
except zhmcclient.NotFound:
return changed, result
if not check_mode:
stop_partition(partition, check_mode)
partition.delete()
changed = True
return changed, result
finally:
session.logoff()
def facts(params, check_mode):
"""
Return partition facts.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['name']
expand_storage_groups = params['expand_storage_groups']
expand_crypto_adapters = params['expand_crypto_adapters']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
# The default exception handling is sufficient for this code
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
partition = cpc.partitions.find(name=partition_name)
partition.pull_full_properties()
add_artificial_properties(
partition, expand_storage_groups, expand_crypto_adapters)
result = partition.properties
return changed, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"absent": ensure_absent,
"active": ensure_active,
"stopped": ensure_stopped,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['absent', 'stopped', 'active', 'facts']),
properties=dict(required=False, type='dict', default={}),
expand_storage_groups=dict(required=False, type='bool', default=False),
expand_crypto_adapters=dict(required=False, type='bool',
default=False),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(changed=changed, partition=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_partition.py | zhmc_partition.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
eq_hex, get_hmc_auth, get_session, to_unicode, process_normal_property
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_storage_volume
version_added: "0.5"
short_description: Manages DPM storage volumes in existing storage groups (with
"dpm-storage-management" feature)
description:
- Gathers facts about a storage volume in a storage group associated with a
CPC.
- Creates, deletes and updates a storage volume in a storage group associated
with a CPC.
notes:
- The CPC that is associated with the storage group must be in the
Dynamic Partition Manager (DPM) operational mode and must have the
"dpm-storage-management" firmware feature enabled.
That feature has been introduced with the z14-ZR1 / Rockhopper II machine
generation.
- This module performs actions only against the Z HMC regarding the
definition of storage volume objects within storage group objects.
This module does not perform any actions against storage subsystems or
SAN switches.
- The Ansible module zhmc_hba is no longer used on CPCs that have the
"dpm-storage-management" feature enabled.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
- Juergen Leopold (@leopoldjuergen, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.20.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC associated with the storage group containing the
target storage volume.
required: true
storage_group_name:
description:
- The name of the storage group containing the target storage volume.
required: true
name:
description:
- The name of the target storage volume.
required: true
state:
description:
- "The desired state for the target storage volume:"
- "* C(absent): Ensures that the storage volume does not exist in the
specified storage group."
- "* C(present): Ensures that the storage volume exists in the specified
storage group, and has the specified properties."
- "* C(facts): Does not change anything on the storage volume and returns
the storage volume properties."
required: true
choices: ['absent', 'present', 'facts']
properties:
description:
- "Dictionary with desired properties for the storage volume.
Used for C(state=present); ignored for C(state=absent|facts).
Dictionary key is the property name with underscores instead
of hyphens, and dictionary value is the property value in YAML syntax.
Integer properties may also be provided as decimal strings."
- "The possible input properties in this dictionary are the properties
defined as writeable in the data model for Storage Volume resources
(where the property names contain underscores instead of hyphens),
with the following exceptions:"
- "* C(name): Cannot be specified because the name has already been
specified in the C(name) module parameter."
- "Properties omitted in this dictionary will remain unchanged when the
storage volume already exists, and will get the default value defined
in the data model for storage volumes in the HMC API book when the
storage volume is being created."
required: false
default: No properties.
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about a storage volume
zhmc_storage_volume:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
name: "{{ my_storage_volume_name }}"
state: facts
register: sv1
- name: Ensure the storage volume does not exist
zhmc_storage_volume:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
name: "{{ my_storage_volume_name }}"
state: absent
- name: Ensure the storage volume exists
zhmc_storage_volume:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
name: "{{ my_storage_volume_name }}"
state: present
properties:
description: "Example storage volume 1"
size: 1
register: sv1
"""
RETURN = """
storage_volume:
description:
- "For C(state=absent), an empty dictionary."
- "For C(state=present|facts), a dictionary with the resource properties of
the storage volume, indicating the state after changes from this module
(if any) have been applied.
The dictionary keys are the exact property names as described in the
data model for the resource, i.e. they contain hyphens (-), not
underscores (_). The dictionary values are the property values using the
Python representations described in the documentation of the zhmcclient
Python package.
The additional artificial properties are:"
- "* C(type): Type of the storage volume ('fc' or 'fcp'), as defined in its
storage group."
returned: success
type: dict
sample: |
C({
"name": "sv-1",
"description": "storage volume #1",
...
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_storage_volume'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of storage volume resources, in this format:
# name: (allowed, create, update, update_while_active, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Indicates whether it can be specified for creating a storage volume
# using the "Modify Storage Group Properties" operation (i.e.
# operation="create" in "storage-volume-request-info").
# update: Indicates whether it can be specified for modifying a storage
# volume using the "Modify Storage Group Properties" operation (i.e.
# operation="modify" in "storage-volume-request-info").
# update_while_active: Indicates whether it can be specified for modifying a
# storage volume using the "Modify Storage Group Properties" operation
# while the storage group is attached to any partition. None means
# "not applicable" (used for update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_STORAGE_VOLUME_PROPERTIES = {
# create-only properties: None
# update-only properties: None
# create+update properties:
'name': (False, True, True, True, None, None), # provided in module parm
'description': (True, True, True, True, None, to_unicode),
'size': (True, True, True, True, None, float),
'usage': (True, True, True, True, None, None),
'model': (True, True, True, True, None, None), # ECKD only
'cylinders': (True, True, True, True, None, int), # ECKD only
'device_number': (True, True, True, True, eq_hex, int), # ECKD only
# read-only properties:
'element_uri': (False, False, False, None, None, None),
'element_id': (False, False, False, None, None, None),
'parent': (False, False, False, None, None, None),
'class': (False, False, False, None, None, None),
'fulfillment_state': (False, False, False, None, None, None),
'active_size': (False, False, False, None, None, None),
'uuid': (False, False, False, None, None, None),
'active_model': (False, False, False, None, None, None),
'control_unit_uri': (False, False, False, None, None, None),
'eckd_type': (False, False, False, None, None, None),
'unit_address': (False, False, False, None, None, None),
# artificial properties:
# 'type': 'fc' or 'fcp', as defined in its storage group
}
def process_properties(cpc, storage_group, storage_volume, params):
"""
Process the properties specified in the 'properties' module parameter,
and return two dictionaries (create_props, update_props) that contain
the properties that can be created, and the properties that can be updated,
respectively. If the resource exists, the input property values are
compared with the existing resource property values and the returned set
of properties is the minimal set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of read-only properties, invalid properties (i.e. not
defined in the data model for storage groups), and properties that are
not allowed because of restrictions or because they are auto-created from
an artificial property is surfaced by raising ParameterError.
- The properties resulting from handling artificial properties are
added to the returned dictionaries.
Parameters:
cpc (zhmcclient.Cpc): CPC associated to the storage group of the target
storage volume.
storage_group (zhmcclient.StorageGroup): Storage group of the target
storage volume.
storage_volume (zhmcclient.StorageVolume): Target storage volume if it
currently exists, or `None` if it does not currently exist.
params (dict): Module input parameters.
Returns:
tuple of (create_props, update_props), where:
* create_props: dict of properties for
zhmcclient.StorageVolumeManager.create()
* update_props: dict of properties for
zhmcclient.StorageVolume.update_properties()
Raises:
ParameterError: An issue with the module parameters.
"""
create_props = {}
update_props = {}
# handle 'name' property.
sv_name = to_unicode(params['name'])
if storage_volume is None:
# SV does not exist yet.
create_props['name'] = sv_name
else:
# SV does already exist.
# We looked up the storage volume by name, so we will never have to
# update the storage volume name.
pass
# handle the other properties
input_props = params.get('properties', None)
if input_props is None:
input_props = {}
for prop_name in input_props:
if prop_name not in ZHMC_STORAGE_VOLUME_PROPERTIES:
raise ParameterError(
"Property {!r} is not defined in the data model for "
"storage volumes.".format(prop_name))
allowed, create, update, update_while_active, eq_func, type_cast = \
ZHMC_STORAGE_VOLUME_PROPERTIES[prop_name]
if not allowed:
raise ParameterError(
"Property {!r} is not allowed in the 'properties' module "
"parameter.".format(prop_name))
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_STORAGE_VOLUME_PROPERTIES, input_props,
storage_volume)
create_props.update(_create_props)
update_props.update(_update_props)
assert _stop is False
return create_props, update_props
def add_artificial_properties(storage_volume):
"""
Add artificial properties to the storage_volume object.
Upon return, the properties of the storage_volume object have been
extended by these properties:
* 'type': Type of storage group of the volume: 'fc' (for ECKD) or 'fcp'.
"""
storage_group = storage_volume.manager.parent
# Type property
type_prop = storage_group.get_property('type')
storage_volume.properties['type'] = type_prop
def ensure_present(params, check_mode):
"""
Ensure that the storage volume is defined and has the specified properties.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['storage_group_name']
storage_volume_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
storage_group = console.storage_groups.find(name=storage_group_name)
# The default exception handling is sufficient for the above.
sg_cpc = storage_group.cpc
if sg_cpc.uri != cpc.uri:
raise ParameterError(
"Storage group {!r} is not associated with the specified "
"CPC {!r}, but with CPC {!r}.".
format(storage_group_name, cpc.name, sg_cpc.name))
try:
storage_volume = storage_group.storage_volumes.find(
name=storage_volume_name)
except zhmcclient.NotFound:
storage_volume = None
except zhmcclient.NoUniqueMatch:
# The name of storage volumes within their storage group is not
# enforced to be unique.
raise
if storage_volume is None:
# It does not exist. Create it and update it if there are
# update-only properties.
if not check_mode:
create_props, update_props = \
process_properties(cpc, storage_group, storage_volume,
params)
storage_volume = storage_group.storage_volumes.create(
create_props)
update2_props = {}
for name in update_props:
if name not in create_props:
update2_props[name] = update_props[name]
if update2_props:
storage_volume.update_properties(update2_props)
# We refresh the properties after the update, in case an
# input property value gets changed.
storage_volume.pull_full_properties()
else:
# TODO: Show props in module result also in check mode.
pass
changed = True
else:
# It exists. Update its properties.
storage_volume.pull_full_properties()
create_props, update_props = \
process_properties(cpc, storage_group, storage_volume, params)
assert not create_props, \
"Unexpected create_props: {!r}".format(create_props)
if update_props:
if not check_mode:
storage_volume.update_properties(update_props)
# We refresh the properties after the update, in case an
# input property value gets changed.
storage_volume.pull_full_properties()
else:
# TODO: Show updated props in mod.result also in chk.mode
storage_volume.pull_full_properties()
changed = True
if not check_mode:
assert storage_volume
if storage_volume:
add_artificial_properties(storage_volume)
result = storage_volume.properties
return changed, result
finally:
session.logoff()
def ensure_absent(params, check_mode):
"""
Ensure that the storage volume does not exist.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['storage_group_name']
storage_volume_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
storage_group = console.storage_groups.find(name=storage_group_name)
# The default exception handling is sufficient for the above.
sg_cpc = storage_group.cpc
if sg_cpc.uri != cpc.uri:
raise ParameterError(
"Storage group {!r} is not associated with the specified "
"CPC {!r}, but with CPC {!r}.".
format(storage_group_name, cpc.name, sg_cpc.name))
try:
storage_volume = storage_group.storage_volumes.find(
name=storage_volume_name)
except zhmcclient.NotFound:
return changed, result
except zhmcclient.NoUniqueMatch:
# The name of storage volumes within their storage group is not
# enforced to be unique.
raise
if not check_mode:
storage_volume.delete()
changed = True
return changed, result
finally:
session.logoff()
def facts(params, check_mode):
"""
Return facts about a storage volume.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['storage_group_name']
storage_volume_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
storage_group = console.storage_groups.find(name=storage_group_name)
# The default exception handling is sufficient for the above.
sg_cpc = storage_group.cpc
if sg_cpc.uri != cpc.uri:
raise ParameterError(
"Storage group {!r} is not associated with the specified "
"CPC {!r}, but with CPC {!r}.".
format(storage_group_name, cpc.name, sg_cpc.name))
try:
storage_volume = storage_group.storage_volumes.find(
name=storage_volume_name)
except zhmcclient.NoUniqueMatch:
# The name of storage volumes within their storage group is not
# enforced to be unique.
raise
storage_volume.pull_full_properties()
add_artificial_properties(storage_volume)
result = storage_volume.properties
return changed, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"absent": ensure_absent,
"present": ensure_present,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
storage_group_name=dict(required=True, type='str'),
name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['absent', 'present', 'facts']),
properties=dict(required=False, type='dict', default={}),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(changed=changed, storage_volume=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_storage_volume.py | zhmc_storage_volume.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
wait_for_transition_completion, eq_hex, eq_mac, get_hmc_auth, \
get_session, to_unicode, process_normal_property
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_nic
version_added: "0.0"
short_description: Manages NICs in existing partitions
description:
- Creates, updates, and deletes NICs in existing partitions of a CPC.
- The targeted CPC must be in the Dynamic Partition Manager (DPM) operational
mode.
notes:
- See also Ansible module zhmc_partition.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
- Juergen Leopold (@leopoldjuergen, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.14.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC with the partition containing the NIC.
required: true
partition_name:
description:
- The name of the partition containing the NIC.
required: true
name:
description:
- The name of the target NIC that is managed. If the NIC needs to be
created, this value becomes its name.
required: true
state:
description:
- "The desired state for the target NIC:"
- "C(absent): Ensures that the NIC does not exist in the specified
partition."
- "C(present): Ensures that the NIC exists in the specified partition
and has the specified properties."
required: true
choices: ["absent", "present"]
properties:
description:
- "Dictionary with input properties for the NIC, for C(state=present).
Key is the property name with underscores instead of hyphens, and
value is the property value in YAML syntax. Integer properties may
also be provided as decimal strings. Will be ignored for
C(state=absent)."
- "The possible input properties in this dictionary are the properties
defined as writeable in the data model for NIC resources (where the
property names contain underscores instead of hyphens), with the
following exceptions:"
- "* C(name): Cannot be specified because the name has already been
specified in the C(name) module parameter."
- "* C(network_adapter_port_uri) and C(virtual_switch_uri): Cannot be
specified because this information is specified using the artificial
properties C(adapter_name) and C(adapter_port)."
- "* C(adapter_name): The name of the adapter that has the port backing
the target NIC. Used for all adapter families (ROCE, OSA,
Hipersockets)."
- "* C(adapter_port): The port index of the adapter port backing the
target NIC. Used for all adapter families (ROCE, OSA, Hipersockets)."
- "Properties omitted in this dictionary will remain unchanged when the
NIC already exists, and will get the default value defined in the
data model for NICs when the NIC is being created."
required: false
default: No input properties
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Ensure NIC exists in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_nic_name }}"
state: present
properties:
adapter_name: "OSD 0128 A13B-13"
adapter_port: 0
description: "The port to our data network"
device_number: "023F"
register: nic1
- name: Ensure NIC does not exist in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_nic_name }}"
state: absent
"""
RETURN = """
nic:
description:
- "For C(state=absent), an empty dictionary."
- "For C(state=present), a dictionary with the resource properties of the
NIC (after changes, if any). The dictionary keys are the exact property
names as described in the data model for the resource, i.e. they contain
hyphens (-), not underscores (_). The dictionary values are the property
values using the Python representations described in the documentation
of the zhmcclient Python package."
returned: success
type: dict
sample: |
C({
"name": "nic-1",
"description": "NIC #1",
"virtual-switch-uri': "/api/vswitches/...",
...
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_nic'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of NIC resources, in this format:
# name: (allowed, create, update, update_while_active, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Indicates whether it can be specified for the "Create NIC"
# operation.
# update: Indicates whether it can be specified for the "Update NIC
# Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Update
# NIC Properties" operation while the partition of the NIC is active. None
# means "not applicable" (i.e. update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
# Note: This should always represent the latest version of the HMC/SE.
# Attempts to set a property that does not exist or that is not writeable in
# the target HMC will be handled by the HMC rejecting the operation.
ZHMC_NIC_PROPERTIES = {
# create+update properties:
'name': (
False, True, True, True, None, None), # provided in 'name' module parm
'description': (True, True, True, True, None, to_unicode),
'device_number': (True, True, True, True, eq_hex, None),
'network_adapter_port_uri': (
False, True, True, True, None, None), # via adapter_name/_port
'virtual_switch_uri': (
False, True, True, True, None, None), # via adapter_name/_port
'adapter_name': (
True, True, True, True, None,
None), # artificial property, type_cast ignored
'adapter_port': (
True, True, True, True, None,
None), # artificial property, type_cast ignored
# The ssc-*, vlan-id and mac-address properties were introduced in
# API version 2.2 (an update of SE 2.13.1).
# The mac-address property was changed to be writeable in API version 2.20
# (SE 2.14.0).
'ssc_management_nic': (True, True, True, True, None, None),
'ssc_ip_address_type': (True, True, True, True, None, None),
'ssc_ip_address': (True, True, True, True, None, None),
'ssc_mask_prefix': (True, True, True, True, None, None),
'vlan_id': (True, True, True, True, None, int),
'mac_address': (True, True, True, None, eq_mac, None),
# The vlan-type property was introduced in API version 2.20 (SE 2.14.0).
'vlan_type': (True, True, True, True, None, None),
# The function-* properties were introduced in API version 3.4
# (SE 2.15 GA2).
'function_number': (True, True, True, True, None, int),
'function_range': (True, True, True, True, None, int),
# read-only properties:
'element-uri': (False, False, False, None, None, None),
'element-id': (False, False, False, None, None, None),
'parent': (False, False, False, None, None, None),
'class': (False, False, False, None, None, None),
'type': (False, False, False, None, None, None),
}
def process_properties(partition, nic, params):
"""
Process the properties specified in the 'properties' module parameter,
and return two dictionaries (create_props, update_props) that contain
the properties that can be created, and the properties that can be updated,
respectively. If the resource exists, the input property values are
compared with the existing resource property values and the returned set
of properties is the minimal set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of read-only properties, invalid properties (i.e. not
defined in the data model for partitions), and properties that are not
allowed because of restrictions or because they are auto-created from
an artificial property is surfaced by raising ParameterError.
- The properties resulting from handling artificial properties are
added to the returned dictionaries.
Parameters:
partition (zhmcclient.Partition): Partition containing the NIC. Must
exist.
nic (zhmcclient.Nic): NIC to be updated with the full set of current
properties, or `None` if it did not previously exist.
params (dict): Module input parameters.
Returns:
tuple of (create_props, update_props, stop), where:
* create_props: dict of properties for
zhmcclient.NicManager.create()
* update_props: dict of properties for
zhmcclient.Nic.update_properties()
* stop (bool): Indicates whether some update properties require the
partition containg the NIC to be stopped when doing the update.
Raises:
ParameterError: An issue with the module parameters.
"""
create_props = {}
update_props = {}
stop = False
# handle 'name' property
nic_name = to_unicode(params['name'])
create_props['name'] = nic_name
# We looked up the NIC by name, so we will never have to update its name
# Names of the artificial properties
adapter_name_art_name = 'adapter_name'
adapter_port_art_name = 'adapter_port'
# handle the other properties
input_props = params.get('properties', {})
if input_props is None:
input_props = {}
for prop_name in input_props:
if prop_name not in ZHMC_NIC_PROPERTIES:
raise ParameterError(
"Property {!r} is not defined in the data model for "
"NICs.".format(prop_name))
allowed, create, update, update_while_active, eq_func, type_cast = \
ZHMC_NIC_PROPERTIES[prop_name]
if not allowed:
raise ParameterError(
"Property {!r} is not allowed in the 'properties' module "
"parameter.".format(prop_name))
if prop_name in (adapter_name_art_name, adapter_port_art_name):
# Artificial properties will be processed together after this loop
continue
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_NIC_PROPERTIES, input_props, nic)
create_props.update(_create_props)
update_props.update(_update_props)
if _stop:
stop = True
# Process artificial properties
if (adapter_name_art_name in input_props) != \
(adapter_port_art_name in input_props):
raise ParameterError(
"Artificial properties {!r} and {!r} must either both be "
"specified or both be omitted.".
format(adapter_name_art_name, adapter_port_art_name))
if adapter_name_art_name in input_props and \
adapter_port_art_name in input_props:
adapter_name = to_unicode(input_props[adapter_name_art_name])
adapter_port_index = int(input_props[adapter_port_art_name])
try:
adapter = partition.manager.cpc.adapters.find(
name=adapter_name)
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not specify the name of an "
"existing adapter: {!r}".
format(adapter_name_art_name, adapter_name))
try:
port = adapter.ports.find(index=adapter_port_index)
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not specify the index of an "
"existing port on adapter {!r}: {!r}".
format(adapter_port_art_name, adapter_name,
adapter_port_index))
# The rest of it depends on the network adapter family:
adapter_family = adapter.get_property('adapter-family')
if adapter_family in ('roce', 'cna'):
# Here we perform the same logic as in the property loop, just now
# simplified by the knowledge about the property flags (create,
# update, etc.).
hmc_prop_name = 'network-adapter-port-uri'
input_prop_value = port.uri
if nic:
if nic.properties.get(hmc_prop_name) != input_prop_value:
update_props[hmc_prop_name] = input_prop_value
else:
update_props[hmc_prop_name] = input_prop_value
create_props[hmc_prop_name] = input_prop_value
elif adapter_family in ('osa', 'hipersockets'):
vswitches = partition.manager.cpc.virtual_switches.findall(
**{'backing-adapter-uri': adapter.uri})
# Adapters of this family always have a vswitch (one for each
# port), so we assert that we can find one or more:
assert vswitches
found_vswitch = None
for vswitch in vswitches:
if vswitch.get_property('port') == adapter_port_index:
found_vswitch = vswitch
break
# Because we already checked for the existence of the specified
# port index, we can now assert that we found the vswitch for that
# port:
assert found_vswitch
# Here we perform the same logic as in the property loop, just now
# simplified by the knowledge about the property flags (create,
# update, etc.).
hmc_prop_name = 'virtual-switch-uri'
input_prop_value = found_vswitch.uri
if nic:
if nic.properties.get(hmc_prop_name) != input_prop_value:
update_props[hmc_prop_name] = input_prop_value
else:
update_props[hmc_prop_name] = input_prop_value
create_props[hmc_prop_name] = input_prop_value
else:
raise ParameterError(
"Artificial property {!r} specifies the name of a non-network "
"adapter of family {!r}: {!r}".
format(adapter_name_art_name, adapter_family, adapter_name))
return create_props, update_props, stop
def ensure_present(params, check_mode):
"""
Ensure that the NIC exists and has the specified properties.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
nic_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
partition = cpc.partitions.find(name=partition_name)
except zhmcclient.NotFound:
if check_mode:
# Once the partition is created, the NIC will also need to be
# created. Therefore, we set changed.
changed = True
return changed, result
raise
try:
nic = partition.nics.find(name=nic_name)
nic.pull_full_properties()
except zhmcclient.NotFound:
nic = None
if not nic:
# It does not exist. Create it and update it if there are
# update-only properties.
if not check_mode:
create_props, update_props, stop = process_properties(
partition, nic, params)
nic = partition.nics.create(create_props)
update2_props = {}
for name in update_props:
if name not in create_props:
update2_props[name] = update_props[name]
if update2_props:
nic.update_properties(update2_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
nic.pull_full_properties()
else:
# TODO: Show props in module result also in check mode.
pass
changed = True
else:
# It exists. Stop the partition if needed due to the NIC property
# update requirements, or wait for an updateable partition status,
# and update the NIC properties.
create_props, update_props, stop = process_properties(
partition, nic, params)
if update_props:
if not check_mode:
# NIC properties can all be updated while the partition is
# active, therefore:
assert not stop
wait_for_transition_completion(partition)
nic.update_properties(update_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
nic.pull_full_properties()
else:
# TODO: Show updated props in mod.result also in chk.mode
pass
changed = True
if nic:
result = nic.properties
return changed, result
finally:
session.logoff()
def ensure_absent(params, check_mode):
"""
Ensure that the NIC does not exist.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
nic_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
try:
nic = partition.nics.find(name=nic_name)
except zhmcclient.NotFound:
return changed, result
if not check_mode:
nic.delete()
changed = True
return changed, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"absent": ensure_absent,
"present": ensure_present,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
partition_name=dict(required=True, type='str'),
name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['absent', 'present']),
properties=dict(required=False, type='dict', default={}),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(changed=changed, nic=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_nic.py | zhmc_nic.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
wait_for_transition_completion, eq_hex, get_hmc_auth, get_session, \
to_unicode, process_normal_property
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_virtual_function
version_added: "0.0"
short_description: Manages virtual functions in existing partitions
description:
- Creates, updates, and deletes virtual functions in existing partitions of a
CPC.
- The targeted CPC must be in the Dynamic Partition Manager (DPM) operational
mode.
notes:
- See also Ansible module zhmc_partition.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
- Juergen Leopold (@leopoldjuergen, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.14.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC with the partition containing the virtual function.
required: true
partition_name:
description:
- The name of the partition containing the virtual function.
required: true
name:
description:
- The name of the target virtual function that is managed. If the virtual
function needs to be created, this value becomes its name.
required: true
state:
description:
- "The desired state for the target virtual function:"
- "C(absent): Ensures that the virtual function does not exist in the
specified partition."
- "C(present): Ensures that the virtual function exists in the specified
partition and has the specified properties."
required: true
choices: ["absent", "present"]
properties:
description:
- "Dictionary with input properties for the virtual function, for
C(state=present). Key is the property name with underscores instead of
hyphens, and value is the property value in YAML syntax. Integer
properties may also be provided as decimal strings. Will be ignored
for C(state=absent)."
- "The possible input properties in this dictionary are the properties
defined as writeable in the data model for Virtual Function resources
(where the property names contain underscores instead of hyphens),
with the following exceptions:"
- "* C(name): Cannot be specified because the name has already been
specified in the C(name) module parameter."
- "* C(adapter_uri): Cannot be specified because this information is
specified using the artificial property C(adapter_name)."
- "* C(adapter_name): The name of the adapter that backs the target
virtual function."
- "Properties omitted in this dictionary will remain unchanged when the
virtual function already exists, and will get the default value
defined in the data model for virtual functions when the virtual
function is being created."
required: false
default: No input properties
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Ensure virtual function exists in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_vfunction_name }}"
state: present
properties:
adapter_name: "ABC-123"
description: "The accelerator adapter"
device_number: "033F"
register: vfunction1
- name: Ensure virtual function does not exist in the partition
zhmc_partition:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
name: "{{ my_vfunction_name }}"
state: absent
"""
RETURN = """
virtual_function:
description:
- "For C(state=absent), an empty dictionary."
- "For C(state=present), a dictionary with the resource properties of the
virtual function (after changes, if any). The dictionary keys are the
exact property names as described in the data model for the resource,
i.e. they contain hyphens (-), not underscores (_). The dictionary
values are the property values using the Python representations
described in the documentation of the zhmcclient Python package."
returned: success
type: dict
sample: |
C({
"name": "vfunction-1",
"description": "virtual function #1",
"adapter-uri': "/api/adapters/...",
...
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_virtual_function'
LOGGER = logging.getLogger(LOGGER_NAME)
# Dictionary of properties of virtual function resources, in this format:
# name: (allowed, create, update, update_while_active, eq_func, type_cast)
# where:
# name: Name of the property according to the data model, with hyphens
# replaced by underscores (this is how it is or would be specified in
# the 'properties' module parameter).
# allowed: Indicates whether it is allowed in the 'properties' module
# parameter.
# create: Indicates whether it can be specified for the "Create Virtual
# Function" operation.
# update: Indicates whether it can be specified for the "Update Virtual
# Function Properties" operation (at all).
# update_while_active: Indicates whether it can be specified for the "Update
# Virtual Function Properties" operation while the partition of the
# virtual function is active. None means "not applicable" (i.e.
# update=False).
# eq_func: Equality test function for two values of the property; None means
# to use Python equality.
# type_cast: Type cast function for an input value of the property; None
# means to use it directly. This can be used for example to convert
# integers provided as strings by Ansible back into integers (that is a
# current deficiency of Ansible).
ZHMC_VFUNCTION_PROPERTIES = {
# create+update properties:
'name': (
False, True, True, True, None, None), # provided in 'name' module parm
'description': (True, True, True, True, None, to_unicode),
'device_number': (True, True, True, True, eq_hex, None),
'adapter_uri': (
False, True, True, True, None, None), # via adapter_name
'adapter_name': (
True, True, True, True, None,
None), # artificial property, type_cast ignored
# read-only properties:
'element-uri': (False, False, False, None, None, None),
'element-id': (False, False, False, None, None, None),
'parent': (False, False, False, None, None, None),
'class': (False, False, False, None, None, None),
}
def process_properties(partition, vfunction, params):
"""
Process the properties specified in the 'properties' module parameter,
and return two dictionaries (create_props, update_props) that contain
the properties that can be created, and the properties that can be updated,
respectively. If the resource exists, the input property values are
compared with the existing resource property values and the returned set
of properties is the minimal set of properties that need to be changed.
- Underscores in the property names are translated into hyphens.
- The presence of read-only properties, invalid properties (i.e. not
defined in the data model for partitions), and properties that are not
allowed because of restrictions or because they are auto-created from
an artificial property is surfaced by raising ParameterError.
- The properties resulting from handling artificial properties are
added to the returned dictionaries.
Parameters:
partition (zhmcclient.Partition): Partition containing the virtual
function. Must exist.
vfunction (zhmcclient.VirtualFunction): Virtual function to be updated
with the full set of current properties, or `None` if it did not
previously exist.
params (dict): Module input parameters.
Returns:
tuple of (create_props, update_props, stop), where:
* create_props: dict of properties for
zhmcclient.VirtualFunctionManager.create()
* update_props: dict of properties for
zhmcclient.VirtualFunction.update_properties()
* stop (bool): Indicates whether some update properties require the
partition containg the virtual function to be stopped when doing the
update.
Raises:
ParameterError: An issue with the module parameters.
"""
create_props = {}
update_props = {}
stop = False
# handle 'name' property
vfunction_name = to_unicode(params['name'])
create_props['name'] = vfunction_name
# We looked up the virtual function by name, so we will never have to
# update its name
# Names of the artificial properties
adapter_name_art_name = 'adapter_name'
# handle the other properties
input_props = params.get('properties', {})
if input_props is None:
input_props = {}
for prop_name in input_props:
if prop_name not in ZHMC_VFUNCTION_PROPERTIES:
raise ParameterError(
"Property {!r} is not defined in the data model for "
"virtual functions.".format(prop_name))
allowed, create, update, update_while_active, eq_func, type_cast = \
ZHMC_VFUNCTION_PROPERTIES[prop_name]
if not allowed:
raise ParameterError(
"Property {!r} is not allowed in the 'properties' module "
"parameter.".format(prop_name))
if prop_name == adapter_name_art_name:
# Artificial properties will be processed together after this loop
continue
# Process a normal (= non-artificial) property
_create_props, _update_props, _stop = process_normal_property(
prop_name, ZHMC_VFUNCTION_PROPERTIES, input_props, vfunction)
create_props.update(_create_props)
update_props.update(_update_props)
if _stop:
stop = True
# Process artificial properties
if adapter_name_art_name in input_props:
adapter_name = to_unicode(input_props[adapter_name_art_name])
try:
adapter = partition.manager.cpc.adapters.find(
name=adapter_name)
except zhmcclient.NotFound:
raise ParameterError(
"Artificial property {!r} does not specify the name of an "
"existing adapter: {!r}".
format(adapter_name_art_name, adapter_name))
# Here we perform the same logic as in the property loop, just now
# simplified by the knowledge about the property flags (create, update,
# etc.).
hmc_prop_name = 'adapter-uri'
input_prop_value = adapter.uri
if vfunction:
if vfunction.properties.get(hmc_prop_name) != input_prop_value:
update_props[hmc_prop_name] = input_prop_value
else:
update_props[hmc_prop_name] = input_prop_value
create_props[hmc_prop_name] = input_prop_value
return create_props, update_props, stop
def ensure_present(params, check_mode):
"""
Ensure that the virtual function exists and has the specified properties.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
vfunction_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
# The default exception handling is sufficient for the above.
try:
partition = cpc.partitions.find(name=partition_name)
except zhmcclient.NotFound:
if check_mode:
# Once the partition is created, the virtual function will
# also need to be created. Therefore, we set changed.
changed = True
return changed, result
raise
try:
vfunction = partition.virtual_functions.find(name=vfunction_name)
vfunction.pull_full_properties()
except zhmcclient.NotFound:
vfunction = None
if not vfunction:
# It does not exist. Create it and update it if there are
# update-only properties.
if not check_mode:
create_props, update_props, stop = process_properties(
partition, vfunction, params)
vfunction = partition.virtual_functions.create(create_props)
update2_props = {}
for name in update_props:
if name not in create_props:
update2_props[name] = update_props[name]
if update2_props:
vfunction.update_properties(update2_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
vfunction.pull_full_properties()
else:
# TODO: Show props in module result also in check mode.
pass
changed = True
else:
# It exists. Stop the partition if needed due to the virtual
# function property update requirements, or wait for an updateable
# partition status, and update the virtual function properties.
create_props, update_props, stop = process_properties(
partition, vfunction, params)
if update_props:
if not check_mode:
# Virtual function properties can all be updated while the
# partition is active, therefore:
assert not stop
wait_for_transition_completion(partition)
vfunction.update_properties(update_props)
# We refresh the properties after the update, in case an
# input property value gets changed (for example, the
# partition does that with memory properties).
vfunction.pull_full_properties()
else:
# TODO: Show updated props in mod.result also in chk.mode
pass
changed = True
if vfunction:
result = vfunction.properties
return changed, result
finally:
session.logoff()
def ensure_absent(params, check_mode):
"""
Ensure that the virtual function does not exist.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
vfunction_name = params['name']
faked_session = params.get('faked_session', None)
changed = False
result = {}
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
try:
vfunction = partition.virtual_functions.find(name=vfunction_name)
except zhmcclient.NotFound:
return changed, result
if not check_mode:
vfunction.delete()
changed = True
return changed, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
StatusError: An issue with the partition status.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"absent": ensure_absent,
"present": ensure_present,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
partition_name=dict(required=True, type='str'),
name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['absent', 'present']),
properties=dict(required=False, type='dict', default={}),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(changed=changed, virtual_function=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_virtual_function.py | zhmc_virtual_function.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, ParameterError, \
get_hmc_auth, get_session
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_crypto_attachment
version_added: "0.6"
short_description: Manages the attachment of crypto adapters and domains to
partitions.
description:
- Gathers facts about the attachment of crypto adapters and domains to a
partition.
- Attaches a range of crypto domains and a number of crypto adapters to a
partition.
- Detaches all crypto domains and all crypto adapters from a partition.
notes:
- The CPC of the target partition must be in the
Dynamic Partition Manager (DPM) operational mode.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.20.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC that has the partition and the crypto adapters.
required: true
partition_name:
description:
- The name of the partition to which the crypto domains and crypto
adapters are attached.
required: true
state:
description:
- "The desired state for the attachment:"
- "* C(attached): Ensures that the specified number of crypto adapters
of the specified crypto type, and the specified range of domain index
numbers in the specified access mode are attached to the partition."
- "* C(detached): Ensures that no crypto adapter and no crypto domains
are attached to the partition."
- "* C(facts): Does not change anything on the attachment and returns
the crypto configuration of the partition."
required: true
choices: ['attached', 'detached', 'facts']
adapter_count:
description:
- "Only for C(state=attach): The number of crypto adapters the partition
needs to have attached.
The special value -1 means all adapters of the desired crypto type in
the CPC.
The C(adapter_names) and C(adapter_count) parameters are mutually
exclusive; if neither is specified the default for C(adapter_count)
applies."
required: false
default: -1
adapter_names:
description:
- "Only for C(state=attach): The names of the crypto adapters the
partition needs to have attached.
The C(adapter_names) and C(adapter_count) parameters are mutually
exclusive; if neither is specified the default for C(adapter_count)
applies."
required: false
default: []
domain_range:
description:
- "Only for C(state=attach): The domain range the partition needs to have
attached, as a tuple of integers (min, max) that specify the inclusive
range of domain index numbers.
Other domains attached to the partition remain unchanged.
The special value -1 for the max item means the maximum supported
domain index number."
required: false
default: (0, -1)
access_mode:
description:
- "Only for C(state=attach): The access mode in which the crypto domains
specified in C(domain_range) need to be attached."
required: false
default: 'usage'
choices: ['usage', 'control']
crypto_type:
description:
- "Only for C(state=attach): The crypto type of the crypto adapters that
will be considered for attaching."
required: false
default: 'ep11'
choices: ['ep11', 'cca', 'acc']
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about the crypto configuration of a partition
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_partition_name }}"
state: facts
register: crypto1
- name: Ensure domain 0 on all ep11 adapters is attached in usage mode
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_first_partition_name }}"
state: attached
crypto_type: ep11
adapter_count: -1
domain_range: 0,0
access_mode: usage
- name: Ensure domains 1-max on all ep11 adapters are attached in control mode
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_first_partition_name }}"
state: attached
crypto_type: ep11
adapter_count: -1
domain_range: 1,-1
access_mode: control
- name: Ensure domains 0-max on 1 ep11 adapter are attached to in usage mode
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_second_partition_name }}"
state: attached
crypto_type: ep11
adapter_count: 1
domain_range: 0,-1
access_mode: usage
- name: Ensure domains 0-max on two specific ep11 adapters are attached
zhmc_crypto_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
partition_name: "{{ my_second_partition_name }}"
state: attached
crypto_type: ep11
adapter_names: [CRYP00, CRYP01]
domain_range: 0,-1
access_mode: usage
"""
RETURN = """
crypto_configuration:
description:
- "For C(state=detached|attached|facts), a
dictionary with the crypto configuration of the partition after the
changes applied by the module. Key is the partition name, and value
is a dictionary with keys:
- 'adapters': attached adapters, as a dict of key: adapter name, value:
dict of adapter properties;
- 'domain_config': attached domains, as a dict of key: domain index,
value: access mode ('control' or 'usage');
- 'usage_domains': domains attached in usage mode, as a list of domain
index numbers;
- 'control_domains': domains attached in control mode, as a list of
domain index numbers."
returned: success
type: dict
sample: |
C({
"part-1": {
"adapters": {
"adapter 1": {
"type": "crypto",
...
}
},
"domain_config": {
"0": "usage",
"1": "control",
"2": "control"
}
"usage_domains": [0],
"control_domains": [1, 2]
}
})
changes:
description:
- "For C(state=detached|attached|facts), a dictionary with the changes
performed."
returned: success
type: dict
sample: |
C({
"added-adapters": ["adapter 1", "adapter 2"],
"added-domains": ["0", "1"]
})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_crypto_attachment'
LOGGER = logging.getLogger(LOGGER_NAME)
# Conversion of crypto types between module parameter values and HMC values
CRYPTO_TYPES_MOD2HMC = {
'acc': 'accelerator',
'cca': 'cca-coprocessor',
'ep11': 'ep11-coprocessor',
}
# Conversion of access modes between module parameter values and HMC values
ACCESS_MODES_MOD2HMC = {
'usage': 'control-usage',
'control': 'control',
}
ACCESS_MODES_HMC2MOD = {
'control-usage': 'usage',
'control': 'control',
}
def get_partition_config(partition, all_adapters):
"""
Return the result of the module by inspecting the current crypto
config. Used for all 'state' parameter values.
Parameters:
partition: Partition object for target partition
all_adapters: List of Adapter objects for all crypto adapters in the CPC
"""
# result items
adapters = dict() # adapter name: adapter properties
domain_config = dict() # domain index: access mode
usage_domains = list() # domains attached in usage mode
control_domains = list() # domains attached in control mode
partition.pull_full_properties() # Make sure it contains the changes
partition_config = partition.get_property('crypto-configuration')
if partition_config:
adapter_uris = partition_config['crypto-adapter-uris']
for a in all_adapters:
if a.uri in adapter_uris:
adapters[a.name] = a.properties
for dc in partition_config['crypto-domain-configurations']:
di = int(dc['domain-index'])
am = ACCESS_MODES_HMC2MOD[dc['access-mode']]
domain_config[di] = am
if am == 'control':
control_domains.append(di)
else:
assert am == 'usage', \
"am={}".format(am)
usage_domains.append(di)
result = dict()
result[partition.name] = dict()
partition_result = result[partition.name]
partition_result['adapters'] = adapters
partition_result['domain_config'] = domain_config
partition_result['usage_domains'] = usage_domains
partition_result['control_domains'] = control_domains
return result
def get_conflicting_domains(
desired_domains, hmc_access_mode, adapter, partition,
all_crypto_config, all_partitions):
"""
Internal function that determines those domains from the desired domains
on a particular adapter that cannot be attached to a particular partition
in the desired mode because they are already attached to other partitions
in a mode that prevents that.
"""
conflicting_domains = dict()
if adapter.uri in all_crypto_config:
domains_dict = all_crypto_config[adapter.uri]
for di in desired_domains:
if di in domains_dict:
# The domain is already attached to some
# partition(s) in some access mode
for am, p_uri in domains_dict[di]:
if am == 'control':
# An attachment in control mode does not
# prevent additional attachments
continue
if p_uri == partition.uri and \
am == hmc_access_mode:
# This is our target partition, and the
# domain is already attached in the desired
# mode.
continue
p = all_partitions[p_uri]
conflicting_domains[di] = (am, p.name)
return conflicting_domains
def ensure_attached(params, check_mode):
"""
Ensure that the specified crypto adapters and crypto domains are attached
to the target partition.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
adapter_count = params.get('adapter_count', None) # No default specified
adapter_names = params.get('adapter_names', None) # No default specified
domain_range = params['domain_range']
access_mode = params['access_mode']
crypto_type = params['crypto_type']
faked_session = params.get('faked_session', None) # No default specified
try:
assert len(domain_range) == 2, \
"len(domain_range)={}".format(len(domain_range))
domain_range_lo = int(domain_range[0])
domain_range_hi = int(domain_range[1])
except (ValueError, AssertionError):
raise ParameterError(
"The 'domain_range' parameter must be a list containing two "
"integer numbers, but is: {!r}".format(domain_range))
hmc_crypto_type = CRYPTO_TYPES_MOD2HMC[crypto_type]
hmc_access_mode = ACCESS_MODES_MOD2HMC[access_mode]
changed = False
result = dict()
result_changes = dict()
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
# Determine all crypto adapters of the specified crypto type.
filter_args = {
'adapter-family': 'crypto',
'crypto-type': hmc_crypto_type,
}
all_adapters = cpc.adapters.list(filter_args=filter_args,
full_properties=True)
if not all_adapters:
raise Error("No crypto adapters of type {!r} found on CPC {!r} ".
format(crypto_type, cpc_name))
all_adapters_dict = {a.name: a for a in all_adapters}
# All crypto adapters in a CPC have the same number of domains
# (otherwise the concept of attaching domains across all attached
# adapters cannot work). Therefore, the max number of domains can be
# gathered from any adapter.
max_domains = all_adapters[0].maximum_crypto_domains
# Parameter checking on domain range.
# (can be done only now because it requires the max_domains).
if domain_range_hi == -1:
domain_range_hi = max_domains - 1
if domain_range_lo > domain_range_hi:
raise ParameterError(
"In the 'domain_range' parameter, the lower boundary (={}) "
"of the range must be less than the higher boundary (={})".
format(domain_range_lo, domain_range_hi))
# Parameter checking on adapter count and adapter names.
# (can be done only now because it requires the number of adapters).
if adapter_count is None and adapter_names is None:
adapter_count == -1
if adapter_count is not None:
if adapter_names is not None:
raise ParameterError(
"The 'adapter_count' and 'adapter_names' parameters are "
"mutually exclusive, but both have been specified: "
"adapter_count={!r}, adapter_names={!r}".
format(adapter_count, adapter_names))
if adapter_count == -1:
adapter_count = len(all_adapters)
elif adapter_count < 1:
raise ParameterError(
"The 'adapter_count' parameter must be at least 1, but "
"is: {}".
format(adapter_count))
elif adapter_count > len(all_adapters):
raise ParameterError(
"The 'adapter_count' parameter must not exceed the "
"number of {} crypto adapters of type {!r} in CPC {!r}, "
"but is {}".
format(len(all_adapters), crypto_type, cpc_name,
adapter_count))
else:
adapter_count = len(adapter_names)
# Verify the specified adapters exist
if adapter_names is not None:
for aname in adapter_names:
if aname not in all_adapters_dict:
raise ParameterError(
"The 'adapter_name' parameter specifies an adapter "
"named {!r} that does not exist in CPC {!r}".
format(aname, cpc_name))
# At this point, we have:
# - adapter_count is a valid number 1..max in all cases.
# - adapter_names is None if the adapters do not matter or is a
# list of existing adapter names of length adapter_count.
#
# Get current crypto config of the target partition.
#
# Domains attached to the partition, as a dict with:
# key: domain index
# value: access mode
attached_domains = dict()
# Adapters attached to the partition, as a list of Adapter objects:
attached_adapters = list()
# Adapters not attached to the partition, as a list of Adapter objects:
detached_adapters = list()
_attached_adapter_uris = list() # URIs of attached adapters
cc = partition.get_property('crypto-configuration')
if cc:
_attached_adapter_uris = cc['crypto-adapter-uris']
for dc in cc['crypto-domain-configurations']:
di = int(dc['domain-index'])
am = dc['access-mode']
LOGGER.debug(
"Crypto config of partition {!r}: "
"Domain {} is attached in {!r} mode".
format(partition.name, di, am))
attached_domains[di] = am
for a in all_adapters:
if a.uri in _attached_adapter_uris:
LOGGER.debug(
"Crypto config of partition {!r}: "
"Adapter {!r} is attached".
format(partition.name, a.name))
attached_adapters.append(a)
else:
LOGGER.debug(
"Crypto config of partition {!r}: "
"Adapter {!r} is not attached".
format(partition.name, a.name))
detached_adapters.append(a)
del _attached_adapter_uris
#
# Get the current crypto config of all partitions of the CPC.
#
# This is needed because finding out whether an adapter has the right
# domains available by simply attaching it to the target partition
# and reacting to the returned status does not work for stopped
# partitions.
#
# All partition of the CPC, as a dict:
# key: partition URI
# value: Partition object
all_partitions = cpc.partitions.list()
all_partitions = dict(zip([p.uri for p in all_partitions],
all_partitions))
# Crypto config of all partitions of the CPC, as a dict with:
# key: adapter URI
# value: dict:
# key: domain index (for attached domains)
# value: list of tuple(access mode, partition URI)
all_crypto_config = dict()
for p_uri in all_partitions:
p = all_partitions[p_uri]
cc = p.get_property('crypto-configuration')
# The 'crypto-configuration' property is None or:
# {
# 'crypto-adapter-uris': ['/api/...', ...],
# 'crypto-domain-configurations': [
# {'domain-index': 15, 'access-mode': 'control-usage'},
# ...
# ]
# }
if cc:
_adapter_uris = cc['crypto-adapter-uris']
for dc in cc['crypto-domain-configurations']:
di = int(dc['domain-index'])
am = dc['access-mode']
for a_uri in _adapter_uris:
if a_uri not in all_crypto_config:
all_crypto_config[a_uri] = dict()
domains_dict = all_crypto_config[a_uri] # mutable
if di not in domains_dict:
domains_dict[di] = list()
domains_dict[di].append((am, p.uri))
#
# Determine the domains to be attached to the target partition
#
desired_domains = list(range(domain_range_lo, domain_range_hi + 1))
add_domains = list() # List of domain index numbers to be attached
for di in desired_domains:
if di not in attached_domains:
# This domain is not attached to the target partition
add_domains.append(di)
elif attached_domains[di] != hmc_access_mode:
# This domain is attached to the target partition but not in
# the desired access mode. The access mode could be extended
# from control to control+usage, but that is not implemented
# by this code here.
raise Error(
"Domain {} is currently attached in {!r} mode to target "
"partition {!r}, but requested was {!r} mode".
format(di,
ACCESS_MODES_HMC2MOD[attached_domains[di]],
partition.name, access_mode))
else:
# This domain is attached to the target partition in the
# desired access mode
pass
# Create the domain config structure for the domains to be attached
add_domain_config = list()
for di in add_domains:
add_domain_config.append(
{'domain-index': di,
'access-mode': hmc_access_mode})
# Check that the domains to be attached to the partition are available
# on the currently attached adapters
for a in attached_adapters:
domains_dict = all_crypto_config[a.uri]
for di in add_domains:
if di in domains_dict:
for am, p_uri in domains_dict[di]:
if am != 'control' and hmc_access_mode != 'control':
# Multiple attachments conflict only when both are
# in usage mode
p = all_partitions[p_uri]
raise Error(
"Domain {} cannot be attached in {!r} mode to "
"target partition {!r} because it is already "
"attached in {!r} mode to partition {!r}".
format(di, access_mode, partition.name,
ACCESS_MODES_HMC2MOD[am], p.name))
# Make sure the desired adapters are attached to the partition
# and the desired domains are attached.
# The HMC enforces the following for non-empty crypto configurations of
# a partition:
# - In the resulting config, the partition needs to have at least one
# adapter attached.
# - In the resulting config, the partition needs to have at least one
# domain attached in usage mode.
# As a result, on an empty crypto config, the first adapter and the
# first domain(s) need to be attached at the same time.
result_changes['added-adapters'] = []
result_changes['added-domains'] = []
if adapter_names is None:
# Only the number of adapters was specified so it can be any
# adapter. We accept any already attached adapter.
missing_count = max(0, adapter_count - len(attached_adapters))
assert missing_count <= len(detached_adapters), \
"missing_count={}, len(detached_adapters)={}".\
format(missing_count, len(detached_adapters))
if missing_count == 0 and add_domain_config:
# Adapters already sufficient, but domains need to be attached
LOGGER.debug(
"Adapters sufficient - attaching domains {!r} in {!r} "
"mode to target partition {!r}".
format(add_domains, access_mode, partition.name))
if not check_mode:
try:
partition.increase_crypto_config([], add_domain_config)
except zhmcclient.Error as exc:
raise Error(
"Attaching domains {!r} in {!r} mode to target "
"partition {!r} failed: {}".
format(add_domains, access_mode, partition.name,
exc))
changed = True
result_changes['added-domains'].extend(add_domains)
elif missing_count > 0:
# Adapters need to be attached
for adapter in detached_adapters:
if missing_count == 0:
break
# Check that the adapter has all needed domains available
conflicting_domains = get_conflicting_domains(
desired_domains, hmc_access_mode, adapter, partition,
all_crypto_config, all_partitions)
if conflicting_domains:
LOGGER.debug(
"Skipping adapter {!r} because the following of "
"its domains are already attached to other "
"partitions: {!r}".
format(adapter.name, conflicting_domains))
continue
LOGGER.debug(
"Attaching adapter {!r} and domains {!r} in {!r} mode "
"to target partition {!r}".
format(adapter.name, add_domains, access_mode,
partition.name))
if not check_mode:
try:
partition.increase_crypto_config(
[adapter], add_domain_config)
except zhmcclient.Error as exc:
raise Error(
"Attaching adapter {!r} and domains {!r} in "
"{!r} mode to target partition {!r} "
"failed: {}".
format(adapter.name, add_domains, access_mode,
partition.name, exc))
changed = True
result_changes['added-adapters'].append(adapter.name)
result_changes['added-domains'].extend(add_domains)
# Don't try to add domains again for next adapter:
add_domain_config = []
add_domains = []
missing_count -= 1
if missing_count > 0:
# Because adapters may be skipped, it is possible that
# there are not enough adapters
raise Error(
"Did not find enough crypto adapters with attachable "
"domains - missing adapters: {}; Requested domains: "
"{}, Access mode: {}".
format(missing_count, desired_domains, access_mode))
else: # adapter_names is not None
# Specific adapters need to be attached. We check already attached
# adapters and add the missing ones. We do not detach adapters
# that are currently attached but not in the input list.
attached_adapter_names = {a.name for a in attached_adapters}
for aname in adapter_names:
if aname not in attached_adapter_names:
adapter = all_adapters_dict[aname]
# Check that the adapter has all needed domains available
conflicting_domains = get_conflicting_domains(
desired_domains, hmc_access_mode, adapter, partition,
all_crypto_config, all_partitions)
if conflicting_domains:
raise Error(
"Crypto adapter {!r} cannot be attached to "
"partition {!r} because the following of "
"its domains are already attached to other "
"partitions in conflicting modes: {!r}".
format(adapter.name, partition.name,
conflicting_domains))
if not check_mode:
try:
partition.increase_crypto_config(
[adapter], add_domain_config)
except zhmcclient.Error as exc:
raise Error(
"Attaching adapter {!r} and domains {!r} in "
"{!r} mode to target partition {!r} "
"failed: {}".
format(adapter.name, add_domains, access_mode,
partition.name, exc))
changed = True
result_changes['added-adapters'].append(adapter.name)
result_changes['added-domains'].extend(add_domains)
# Don't try to add domains again for next adapter:
add_domain_config = []
add_domains = []
if add_domain_config:
# The desired adapters were already attached so the additional
# domains need to be added to the crypto config.
LOGGER.debug(
"Adapters were already attached to target partition {!r} "
"- attaching domains {!r} in {!r} mode".
format(partition.name, add_domains, access_mode))
if not check_mode:
try:
partition.increase_crypto_config(
[], add_domain_config)
except zhmcclient.Error as exc:
raise Error(
"Attaching domains {!r} in {!r} mode to "
"target partition {!r} failed: {}".
format(add_domains, access_mode, partition.name,
exc))
changed = True
result_changes['added-domains'].extend(add_domains)
if not check_mode:
# This is not optimal because it does not produce a result
# in check mode, but because the actual config is determined,
# instead of the artificially calculated one, it seems better
# to return no config than the unchanged actual config.
result.update(get_partition_config(partition, all_adapters))
return changed, result, result_changes
finally:
session.logoff()
def ensure_detached(params, check_mode):
"""
Ensure that the target partition has no adapters and no domains attached.
Raises:
ParameterError: An issue with the module parameters.
Error: Other errors during processing.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
# Note: Defaults specified in argument_spec will be set in params dict
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
faked_session = params.get('faked_session', None) # No default specified
changed = False
result = dict()
result_changes = dict()
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
# Determine all crypto adapters of any crypto type
filter_args = {
'adapter-family': 'crypto',
}
all_adapters = cpc.adapters.list(filter_args=filter_args,
full_properties=True)
cc = partition.get_property('crypto-configuration')
# The 'crypto-configuration' property is None or:
# {
# 'crypto-adapter-uris': ['/api/...', ...],
# 'crypto-domain-configurations': [
# {'domain-index': 15, 'access-mode': 'control-usage'},
# ...
# ]
# }
if cc:
attached_adapter_uris = cc['crypto-adapter-uris']
remove_adapters = []
remove_adapter_names = []
for a in all_adapters:
if a.uri in attached_adapter_uris:
remove_adapters.append(a)
remove_adapter_names.append(a.name)
remove_domains = []
for dc in cc['crypto-domain-configurations']:
di = dc['domain-index']
remove_domains.append(di)
LOGGER.debug(
"Detaching adapters {!r} and domains {!r} from target "
"partition {!r}".
format(remove_adapter_names, remove_domains, partition.name))
if not check_mode:
try:
partition.decrease_crypto_config(
remove_adapters, remove_domains)
except zhmcclient.Error as exc:
raise Error(
"Detaching adapters {!r} and domains {!r} from "
"target partition {!r} failed: {}".
format(remove_adapter_names, remove_domains,
partition.name, exc))
changed = True
result_changes['removed-adapters'] = remove_adapter_names
result_changes['removed-domains'] = remove_domains
if not check_mode:
# This is not optimal because it does not produce a result
# in check mode, but because the actual config is determined,
# instead of the artificially calculated one, it seems better
# to return no config than the unchanged actual config.
result.update(get_partition_config(partition, all_adapters))
return changed, result, result_changes
finally:
session.logoff()
def facts(params, check_mode):
"""
Return facts about the crypto configuration of the partition.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
partition_name = params['partition_name']
faked_session = params.get('faked_session', None) # No default specified
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
cpc = client.cpcs.find(name=cpc_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
# Determine all crypto adapters of any crypto type
filter_args = {
'adapter-family': 'crypto',
}
all_adapters = cpc.adapters.list(filter_args=filter_args,
full_properties=True)
result = get_partition_config(partition, all_adapters)
return False, result, None
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"attached": ensure_attached,
"detached": ensure_detached,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
partition_name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['attached', 'detached', 'facts']),
adapter_count=dict(required=False, type='int'),
adapter_names=dict(required=False, type='list'),
domain_range=dict(required=False, type='list', default=[0, -1]),
access_mode=dict(required=False, type='str',
choices=['usage', 'control'], default='usage'),
crypto_type=dict(required=False, type='str',
choices=['ep11', 'cca', 'acc'], default='ep11'),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result, changes = perform_task(
module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, crypto_configuration: {!r}, "
"changes: {!r}".format(changed, result, changes))
module.exit_json(
changed=changed, crypto_configuration=result, changes=changes)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_crypto_attachment.py | zhmc_crypto_attachment.py |
from __future__ import absolute_import, print_function
import logging
from ansible.module_utils.basic import AnsibleModule
import requests.packages.urllib3
import zhmcclient
from zhmc_ansible_modules.utils import log_init, Error, \
get_hmc_auth, get_session
# For information on the format of the ANSIBLE_METADATA, DOCUMENTATION,
# EXAMPLES, and RETURN strings, see
# http://docs.ansible.com/ansible/dev_guide/developing_modules_documenting.html
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community',
'shipped_by': 'other',
'other_repo_url': 'https://github.com/zhmcclient/zhmc-ansible-modules'
}
DOCUMENTATION = """
---
module: zhmc_storage_group_attachment
version_added: "0.5"
short_description: Manages the attachment of DPM storage groups to
partitions (with "dpm-storage-management" feature)
description:
- Gathers facts about the attachment of a storage group to a partition.
- Attaches and detaches a storage group to and from a partition.
notes:
- The CPC that is associated with the target storage group must be in the
Dynamic Partition Manager (DPM) operational mode and must have the
"dpm-storage-management" firmware feature enabled.
That feature has been introduced with the z14-ZR1 / Rockhopper II machine
generation.
- This module performs actions only against the Z HMC regarding the
attachment of storage group objects to partitions.
This module does not perform any actions against storage subsystems or
SAN switches.
- The Ansible module zhmc_hba is no longer used on CPCs that have the
"dpm-storage-management" feature enabled.
author:
- Andreas Maier (@andy-maier, [email protected])
- Andreas Scheuring (@scheuran, [email protected])
- Juergen Leopold (@leopoldjuergen, [email protected])
requirements:
- Network access to HMC
- zhmcclient >=0.20.0
- ansible >=2.2.0.0
options:
hmc_host:
description:
- The hostname or IP address of the HMC.
required: true
hmc_auth:
description:
- The authentication credentials for the HMC.
required: true
suboptions:
userid:
description:
- The userid (username) for authenticating with the HMC.
required: true
password:
description:
- The password for authenticating with the HMC.
required: true
cpc_name:
description:
- The name of the CPC that has the partition and is associated with the
storage group.
required: true
storage_group_name:
description:
- The name of the storage group for the attachment.
required: true
partition_name:
description:
- The name of the partition for the attachment.
required: true
state:
description:
- "The desired state for the attachment:"
- "* C(detached): Ensures that the storage group is not attached to the
partition. If the storage group is currently attached to the partition
and the partition is currently active, the module will fail."
- "* C(attached): Ensures that the storage group is attached to the
partition."
- "* C(facts): Does not change anything on the attachment and returns
the attachment status."
required: true
choices: ['detached', 'attached', 'facts']
log_file:
description:
- "File path of a log file to which the logic flow of this module as well
as interactions with the HMC are logged. If null, logging will be
propagated to the Python root logger."
required: false
default: null
faked_session:
description:
- "A C(zhmcclient_mock.FakedSession) object that has a mocked HMC set up.
If provided, it will be used instead of connecting to a real HMC. This
is used for testing purposes only."
required: false
default: Real HMC will be used.
"""
EXAMPLES = """
---
# Note: The following examples assume that some variables named 'my_*' are set.
- name: Gather facts about the attachment
zhmc_storage_group_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
partition_name: "{{ my_partition_name }}"
state: facts
register: sga1
- name: Ensure the storage group is attached to the partition
zhmc_storage_group_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
partition_name: "{{ my_partition_name }}"
state: attached
- name: "Ensure the storage group is not attached to the partition."
zhmc_storage_group_attachment:
hmc_host: "{{ my_hmc_host }}"
hmc_auth: "{{ my_hmc_auth }}"
cpc_name: "{{ my_cpc_name }}"
storage_group_name: "{{ my_storage_group_name }}"
partition_name: "{{ my_partition_name }}"
state: detached
"""
RETURN = """
storage_group_attachment:
description:
- "A dictionary with a single key 'attached' whose boolean value indicates
whether the storage group is now actually attached to the partition.
If check mode was requested, the actual (i.e. not the desired)
attachment state is returned."
returned: success
type: dict
sample: |
C({"attached": true})
"""
# Python logger name for this module
LOGGER_NAME = 'zhmc_storage_group_attachment'
LOGGER = logging.getLogger(LOGGER_NAME)
def ensure_attached(params, check_mode):
"""
Ensure that the storage group is attached to the partition.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['storage_group_name']
partition_name = params['partition_name']
faked_session = params.get('faked_session', None)
changed = False
attached = None
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
storage_group = console.storage_groups.find(name=storage_group_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
attached_partitions = storage_group.list_attached_partitions(
name=partition_name)
if not attached_partitions:
# The storage group is detached from the partition
attached = False
if not check_mode:
partition.attach_storage_group(storage_group)
attached = True
changed = True
else:
# The storage group is already attached to the partition
assert len(attached_partitions) == 1
assert attached_partitions[0].name == partition_name
attached = True
result = dict(attached=attached)
return changed, result
finally:
session.logoff()
def ensure_detached(params, check_mode):
"""
Ensure that the storage group is detached from the partition.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['storage_group_name']
partition_name = params['partition_name']
faked_session = params.get('faked_session', None)
changed = False
attached = None
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
storage_group = console.storage_groups.find(name=storage_group_name)
partition = cpc.partitions.find(name=partition_name)
# The default exception handling is sufficient for the above.
attached_partitions = storage_group.list_attached_partitions(
name=partition_name)
if attached_partitions:
# The storage group is attached to the partition
assert len(attached_partitions) == 1
assert attached_partitions[0].name == partition_name
attached = True
if not check_mode:
partition.detach_storage_group(storage_group)
attached = False
changed = True
else:
# The storage group is already detached from the partition
attached = False
result = dict(attached=attached)
return changed, result
finally:
session.logoff()
def facts(params, check_mode):
"""
Return facts about the attachment of a storage group to a partition.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
host = params['hmc_host']
userid, password = get_hmc_auth(params['hmc_auth'])
cpc_name = params['cpc_name']
storage_group_name = params['storage_group_name']
partition_name = params['partition_name']
faked_session = params.get('faked_session', None)
changed = False
attached = None
try:
session = get_session(faked_session, host, userid, password)
client = zhmcclient.Client(session)
console = client.consoles.console
cpc = client.cpcs.find(name=cpc_name)
storage_group = console.storage_groups.find(name=storage_group_name)
cpc.partitions.find(name=partition_name) # check existance
# The default exception handling is sufficient for the above.
attached_partitions = storage_group.list_attached_partitions(
name=partition_name)
if attached_partitions:
# The storage group is attached to the partition
assert len(attached_partitions) == 1
assert attached_partitions[0].name == partition_name
attached = True
else:
# The storage group is not attached to the partition
attached = False
result = dict(attached=attached)
return changed, result
finally:
session.logoff()
def perform_task(params, check_mode):
"""
Perform the task for this module, dependent on the 'state' module
parameter.
If check_mode is True, check whether changes would occur, but don't
actually perform any changes.
Raises:
ParameterError: An issue with the module parameters.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
actions = {
"detached": ensure_detached,
"attached": ensure_attached,
"facts": facts,
}
return actions[params['state']](params, check_mode)
def main():
# The following definition of module input parameters must match the
# description of the options in the DOCUMENTATION string.
argument_spec = dict(
hmc_host=dict(required=True, type='str'),
hmc_auth=dict(required=True, type='dict', no_log=True),
cpc_name=dict(required=True, type='str'),
storage_group_name=dict(required=True, type='str'),
partition_name=dict(required=True, type='str'),
state=dict(required=True, type='str',
choices=['detached', 'attached', 'facts']),
log_file=dict(required=False, type='str', default=None),
faked_session=dict(required=False, type='object'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
log_file = module.params['log_file']
log_init(LOGGER_NAME, log_file)
_params = dict(module.params)
del _params['hmc_auth']
LOGGER.debug("Module entry: params: {!r}".format(_params))
try:
changed, result = perform_task(module.params, module.check_mode)
except (Error, zhmcclient.Error) as exc:
# These exceptions are considered errors in the environment or in user
# input. They have a proper message that stands on its own, so we
# simply pass that message on and will not need a traceback.
msg = "{}: {}".format(exc.__class__.__name__, exc)
LOGGER.debug(
"Module exit (failure): msg: {!r}".
format(msg))
module.fail_json(msg=msg)
# Other exceptions are considered module errors and are handled by Ansible
# by showing the traceback.
LOGGER.debug(
"Module exit (success): changed: {!r}, cpc: {!r}".
format(changed, result))
module.exit_json(changed=changed, storage_group_attachment=result)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
main() | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/zhmc_storage_group_attachment.py | zhmc_storage_group_attachment.py |
import logging
from ansible.module_utils import six
from zhmcclient import Session
from zhmcclient_mock import FakedSession
class Error(Exception):
"""
Abstract base class that serves as a common exception superclass for the
zhmc Ansible module.
"""
pass
class ParameterError(Error):
"""
Indicates an error with the module input parameters.
"""
pass
class StatusError(Error):
"""
Indicates an error with the status of the partition.
"""
pass
# Partition status values that may happen after Partition.start()
START_END_STATUSES = ('active', 'degraded', 'reservation-error')
# Partition status values that may happen after Partition.stop()
STOP_END_STATUSES = ('stopped', 'terminated', 'paused')
# Partition status values that indicate CPC issues
BAD_STATUSES = ('communications-not-active', 'status-check')
def eq_hex(hex_actual, hex_new, prop_name):
"""
Test two hex string values of a property for equality.
"""
if hex_actual:
try:
int_actual = int(hex_actual, 16)
except ValueError:
raise ParameterError(
"Unexpected: Actual value of property {!r} is not a valid hex "
"number: {!r}".
format(prop_name, hex_actual))
else:
int_actual = None
if hex_new:
try:
int_new = int(hex_new, 16)
except ValueError:
raise ParameterError(
"New value for property {!r} is not a valid hex number: {!r}".
format(prop_name, hex_new))
else:
int_new = None
return int_actual == int_new
def _normalized_mac(mac_str):
mac_ints = [int(h, 16) for h in mac_str.split(':')]
mac_str = ':'.join(["%02x" % i for i in mac_ints])
return mac_str
def eq_mac(mac_actual, mac_new, prop_name):
"""
Test two MAC address string values of a property for equality.
"""
if mac_actual:
try:
mac_actual = _normalized_mac(mac_actual)
except ValueError:
raise ParameterError(
"Unexpected: Actual value of property {!r} is not a valid MAC "
"address: {!r}".
format(prop_name, mac_actual))
else:
mac_actual = None
if mac_new:
try:
mac_new = _normalized_mac(mac_new)
except ValueError:
raise ParameterError(
"New value for property {!r} is not a valid MAC address: {!r}".
format(prop_name, mac_new))
else:
mac_new = None
return mac_actual == mac_new
def get_hmc_auth(hmc_auth):
"""
Extract HMC userid and password from the 'hmc_auth' module input
parameter.
Parameters:
hmc_auth (dict): value of the 'hmc_auth' module input parameter,
which is a dictionary with items 'userid' and 'password'.
Returns:
tuple(userid, password): A tuple with the respective items
of the input dictionary.
Raises:
ParameterError: An item in the input dictionary was missing.
"""
try:
userid = hmc_auth['userid']
except KeyError:
raise ParameterError("Required item 'userid' is missing in "
"dictionary module parameter 'hmc_auth'.")
try:
password = hmc_auth['password']
except KeyError:
raise ParameterError("Required item 'password' is missing in "
"dictionary module parameter 'hmc_auth'.")
return userid, password
def pull_partition_status(partition):
"""
Retrieve the partition operational status as fast as possible and return
it.
"""
parts = partition.manager.cpc.partitions.list(
filter_args={'name': partition.name})
assert len(parts) == 1
this_part = parts[0]
actual_status = this_part.get_property('status')
return actual_status
def stop_partition(partition, check_mode):
"""
Ensure that the partition is stopped, by influencing the operational
status of the partition, regardless of what its current operational status
is.
If this function returns, the operational status of the partition will be
'stopped'.
Parameters:
partition (zhmcclient.Partition): The partition (must exist, and its
status property is assumed to be current).
check_mode (bool): Indicates whether the playbook was run in check mode,
in which case this method does ot actually stop the partition, but
just returns what would have been done.
Returns:
bool: Indicates whether the partition was changed.
Raises:
StatusError: Partition is in one of BAD_STATUSES or did not reach the
'stopped' status despite attempting it.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
changed = False
partition.pull_full_properties()
status = partition.get_property('status')
if status in BAD_STATUSES:
raise StatusError(
"Target CPC {!r} has issues; status of partition {!r} is: {!r}".
format(partition.manager.cpc.name, partition.name, status))
elif status == 'stopped':
pass
elif status == 'starting':
if not check_mode:
# Let it first finish the starting
partition.wait_for_status(START_END_STATUSES)
start_end_status = pull_partition_status(partition)
# Then stop it
partition.stop()
status = pull_partition_status(partition)
if status != 'stopped':
raise StatusError(
"Could not get partition {!r} from {!r} status into "
"'stopped' status after waiting for its starting to "
"complete; current status is: {!r}".
format(partition.name, start_end_status, status))
changed = True
elif status == 'stopping':
if not check_mode:
# Let it finish the stopping
partition.wait_for_status(STOP_END_STATUSES)
stop_end_status = pull_partition_status(partition)
if stop_end_status != 'stopped':
# Make another attempt to stop it
partition.stop()
status = pull_partition_status(partition)
if status != 'stopped':
raise StatusError(
"Could not get partition {!r} from {!r} status into "
"'stopped' status after waiting for its stopping to "
"complete; current status is: {!r}".
format(partition.name, stop_end_status, status))
changed = True
else:
if not check_mode:
previous_status = pull_partition_status(partition)
partition.stop()
status = pull_partition_status(partition)
if status != 'stopped':
raise StatusError(
"Could not get partition {!r} from {!r} status into "
"'stopped' status; current status is: {!r}".
format(partition.name, previous_status, status))
changed = True
return changed
def start_partition(partition, check_mode):
"""
Ensure that the partition is started, by influencing the operational
status of the partition, regardless of what its current operational status
is.
The resulting operational status will be one of START_END_STATUSES.
Parameters:
partition (zhmcclient.Partition): The partition (must exist, and its
status property is assumed to be current).
check_mode (bool): Indicates whether the playbook was run in check mode,
in which case this method does not actually change the partition, but
just returns what would have been done.
Returns:
bool: Indicates whether the partition was changed.
Raises:
StatusError: Partition is in one of BAD_STATUSES or did not reach a
started status despite attempting it.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
changed = False
partition.pull_full_properties()
status = partition.get_property('status')
if status in BAD_STATUSES:
raise StatusError(
"Target CPC {!r} has issues; status of partition {!r} is: {!r}".
format(partition.manager.cpc.name, partition.name, status))
elif status in START_END_STATUSES:
pass
elif status == 'stopping':
if not check_mode:
# Let it first finish the stopping
partition.wait_for_status(STOP_END_STATUSES)
stop_end_status = pull_partition_status(partition)
# Then start it
partition.start()
status = pull_partition_status(partition)
if status not in START_END_STATUSES:
raise StatusError(
"Could not get partition {!r} from {!r} status into "
"a started status after waiting for its stopping to "
"complete; current status is: {!r}".
format(partition.name, stop_end_status, status))
changed = True
elif status == 'starting':
if not check_mode:
# Let it finish the starting
partition.wait_for_status(START_END_STATUSES)
changed = True
else:
if not check_mode:
previous_status = pull_partition_status(partition)
partition.start()
status = pull_partition_status(partition)
if status not in START_END_STATUSES:
raise StatusError(
"Could not get partition {!r} from {!r} status into "
"a started status; current status is: {!r}".
format(partition.name, previous_status, status))
changed = True
return changed
def wait_for_transition_completion(partition):
"""
If the partition is in a transitional state, wait for completion of that
transition. This is required for updating properties.
The resulting operational status will be one of START_END_STATUSES or
STOP_END_STATUSES.
Parameters:
partition (zhmcclient.Partition): The partition (must exist, and its
status property is assumed to be current).
Raises:
StatusError: Partition is in one of BAD_STATUSES.
zhmcclient.Error: Any zhmcclient exception can happen.
"""
partition.pull_full_properties()
status = partition.get_property('status')
if status in BAD_STATUSES:
raise StatusError(
"Target CPC {!r} has issues; status of partition {!r} is: {!r}".
format(partition.manager.cpc.name, partition.name, status))
elif status == 'stopping':
partition.wait_for_status(STOP_END_STATUSES)
elif status == 'starting':
partition.wait_for_status(START_END_STATUSES)
else:
assert status in START_END_STATUSES or status in STOP_END_STATUSES
def get_session(faked_session, host, userid, password):
"""
Return a session object for the HMC.
Parameters:
faked_session (zhmcclient_mock.FakedSession or None):
If this object is a `zhmcclient_mock.FakedSession` object, return that
object.
Else, return a new `zhmcclient.Session` object from the `host`,
`userid`, and `password` arguments.
"""
if isinstance(faked_session, FakedSession):
return faked_session
else:
return Session(host, userid, password)
def to_unicode(value):
"""
Return the input value as a unicode string.
The input value may be and will result in:
* None -> None
* binary string -> decoded using UTF-8 to unicode string
* unicode string -> unchanged
* list or tuple with items of any of the above -> list with converted items
"""
if isinstance(value, (list, tuple)):
list_uval = []
for val in value:
uval = to_unicode(val)
list_uval.append(uval)
return list_uval
elif isinstance(value, six.binary_type):
return value.decode('utf-8')
elif isinstance(value, six.text_type):
return value
elif value is None:
return None
else:
raise TypeError("Value of {} cannot be converted to unicode: {!r}".
format(type(value), value))
def process_normal_property(
prop_name, resource_properties, input_props, resource):
"""
Process a normal (= non-artificial) property.
Parameters:
prop_name (string): Property name (using Ansible module names).
resource_properties (dict): Dictionary of property definitions for the
resource type (e.g. ZHMC_PARTITION_PROPERTIES). Each value must be a
tuple (allowed, create, update, update_while_active, eq_func,
type_cast). For details, see the modules using this function.
input_props (dict): New properties.
resource: zhmcclient resource object (e.g. zhmcclient.Partition) with
all properties pulled.
Returns:
tuple of (create_props, update_props, stop), where:
* create_props: dict of properties for resource creation.
* update_props: dict of properties for resource update.
* deactivate (bool): Indicates whether the resource needs to be
deactivated because there are properties to be updated that
require that.
Raises:
ParameterError: An issue with the module parameters.
"""
create_props = {}
update_props = {}
deactivate = False
allowed, create, update, update_while_active, eq_func, type_cast = \
resource_properties[prop_name]
# Double check that the property is not a read-only property
assert allowed
assert create or update
hmc_prop_name = prop_name.replace('_', '-')
input_prop_value = input_props[prop_name]
if type_cast:
input_prop_value = type_cast(input_prop_value)
if resource:
# Resource does exist.
current_prop_value = resource.properties.get(hmc_prop_name)
if eq_func:
equal = eq_func(current_prop_value, input_prop_value,
prop_name)
else:
equal = (current_prop_value == input_prop_value)
if not equal:
if update:
update_props[hmc_prop_name] = input_prop_value
if not update_while_active:
deactivate = True
else:
raise ParameterError(
"Property {!r} can be set during {} "
"creation but cannot be updated afterwards "
"(from {!r} to {!r}).".
format(prop_name, resource.__class__.__name__,
current_prop_value, input_prop_value))
else:
# Resource does not exist.
# Prefer setting the property during resource creation.
if create:
create_props[hmc_prop_name] = input_prop_value
else:
update_props[hmc_prop_name] = input_prop_value
if not update_while_active:
deactivate = True
return create_props, update_props, deactivate
def log_init(logger_name, log_file=None):
"""
Set up logging for the loggers of the current Ansible module, and for the
loggers of the underlying zhmcclient package.
The log level of these loggers is set to debug.
If a log file is specified, a log file handler for that log file (with a
log formatter) is created and attached to these loggers.
Parameters:
logger_name (string): Name of the logger to be used for the current
Ansible module.
log_file (string): Path name of a log file to log to, or `None`.
If `None`, logging will be propagated to the Python root logger.
"""
# The datefmt parameter of logging.Formatter() supports the datetime
# formatting placeholders of time.strftime(). Unfortunately, the %f
# placeholder for microseconds is not supported by time.strftime().
# If datefmt=None, the milliseconds are added manually by the
# logging.Formatter() class. So this is a choice between precision and
# indicating the timezone offset.
# The time is in the local timezone.
#
DATEFMT = '%Y-%m-%dT%H:%M:%S%z' # 2019-02-20T10:54:26+0100
# DATEFMT = None # 2019-02-20 10:54:26,123 (= local time)
if log_file:
handler = logging.FileHandler(log_file)
fmt = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(name)s %(process)d %(message)s',
datefmt=DATEFMT)
handler.setFormatter(fmt)
else:
handler = None
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
if handler:
logger.addHandler(handler)
logger = logging.getLogger('zhmcclient.hmc')
logger.setLevel(logging.DEBUG)
if handler:
logger.addHandler(handler)
if False: # Too much gorp, disabled for now
logger = logging.getLogger('zhmcclient.api')
logger.setLevel(logging.DEBUG)
if handler:
logger.addHandler(handler) | zhmc-ansible-modules | /zhmc-ansible-modules-0.8.4.tar.gz/zhmc-ansible-modules-0.8.4/zhmc_ansible_modules/utils/__init__.py | __init__.py |
.. Copyright 2023 IBM Corp. All Rights Reserved.
..
.. Licensed under the Apache License, Version 2.0 (the "License");
.. you may not use this file except in compliance with the License.
.. You may obtain a copy of the License at
..
.. http://www.apache.org/licenses/LICENSE-2.0
..
.. Unless required by applicable law or agreed to in writing, software
.. distributed under the License is distributed on an "AS IS" BASIS,
.. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
.. See the License for the specific language governing permissions and
.. limitations under the License.
IBM Z HMC OS Message Forwarder
==============================
.. image:: https://img.shields.io/pypi/v/zhmc-os-forwarder.svg
:target: https://pypi.python.org/pypi/zhmc-os-forwarder/
:alt: Version on Pypi
.. image:: https://github.com/zhmcclient/zhmc-os-forwarder/workflows/test/badge.svg?branch=master
:target: https://github.com/zhmcclient/zhmc-os-forwarder/actions?query=branch%3Amaster
:alt: Test status (master)
.. image:: https://readthedocs.org/projects/zhmc-os-forwarder/badge/?version=latest
:target: https://readthedocs.org/projects/zhmc-os-forwarder/builds/
:alt: Docs status (master)
.. image:: https://coveralls.io/repos/github/zhmcclient/zhmc-os-forwarder/badge.svg?branch=master
:target: https://coveralls.io/github/zhmcclient/zhmc-os-forwarder?branch=master
:alt: Test coverage (master)
The **IBM Z HMC OS Message Forwarder** connects to the console of operating
systems running in LPARs on Z systems and forwards the messages written by the
operating systems in the LPARs to remote syslog servers.
The Z systems can be in classic or DPM operational mode.
The forwarder attempts to stay up as much as possible, for example it performs
automatic session renewals with the HMC if the logon session expires, and it
survives HMC reboots and automatically resumes forwarding again once
the HMC come back up, without loosing or duplicating any messages.
Documentation
-------------
* `Documentation`_
* `Change log`_
.. _Documentation: https://zhmc-os-forwarder.readthedocs.io/en/stable/
.. _Change log: https://zhmc-os-forwarder.readthedocs.io/en/stable/changes.html
Supported environments
----------------------
* Operating systems: Linux, macOS, Windows
* Python versions: 3.5 and higher
* HMC versions: 2.11.1 and higher
Quickstart
----------
* Install the forwarder and all of its Python dependencies as follows:
.. code-block:: bash
$ pip install zhmc-os-forwarder
* Provide a *config file* for use by the forwarder.
The config file tells the forwarder which HMC to use, and for which CPCs
and LPARs it should forward to which syslog servers.
Download the `Example forwarder config file`_ and edit that copy according
to your needs.
For details, see `Forwarder config file`_.
.. _Example forwarder config file: examples/config_example.yaml
.. _Forwarder config file: https://zhmc-os-forwarder.readthedocs.io/en/stable/usage.html#forwarder-config-file
* Run the forwarder as follows:
.. code-block:: bash
$ zhmc_os_forwarder -c config.yaml
zhmc_os_forwarder version: 0.2.0
zhmcclient version: 1.10.0
Verbosity level: 0
Opening session with HMC 10.11.12.13 (user: [email protected], certificate validation: False)
Forwarder is up and running (Press Ctrl-C to shut down)
Limitations
-----------
At this point, the forwarder has several limitations. All of them are intended
to be resolved in future releases.
* The forwarder does not recover from HMC restart or connection loss
* Restarting the forwarder will send again all OS messages the HMC has buffered
* New and deleted LPARs in DPM mode are not automatically detected.
Reporting issues
----------------
If you encounter a problem, please report it as an `issue on GitHub`_.
.. _issue on GitHub: https://github.com/zhmcclient/zhmc-os-forwarder/issues
License
-------
This package is licensed under the `Apache 2.0 License`_.
.. _Apache 2.0 License: http://apache.org/licenses/LICENSE-2.0
| zhmc-os-forwarder | /zhmc_os_forwarder-0.2.0.tar.gz/zhmc_os_forwarder-0.2.0/README.rst | README.rst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.