docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Widget for displaying detailed noise map.
Args:
backend (IBMQbackend): The backend.
Returns:
GridBox: Widget holding noise map images. | def detailed_map(backend):
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
single_gate_errors = [q['parameters'][0]['value']
for q in props['gates'][2:3*config['n_qubits']:3]]
single_norm = matplotlib.colors.Normalize(
vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [cm.viridis(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
cx_errors = []
for line in cmap:
for item in props['gates'][3*config['n_qubits']:]:
if item['qubits'] == line:
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
cx_norm = matplotlib.colors.Normalize(
vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [cm.viridis(cx_norm(err)) for err in cx_errors]
single_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='left',
align_items='center'))
cmap_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='top',
width='auto', height='auto',
align_items='center'))
cx_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='right',
align_items='center'))
tick_locator = mpl.ticker.MaxNLocator(nbins=5)
with cmap_widget:
noise_map = plot_gate_map(backend, qubit_color=q_colors,
line_color=line_colors,
qubit_size=28,
plot_directed=True)
width, height = noise_map.get_size_inches()
noise_map.set_size_inches(1.25*width, 1.25*height)
display(noise_map)
plt.close(noise_map)
with single_widget:
cbl_fig = plt.figure(figsize=(3, 1))
ax1 = cbl_fig.add_axes([0.05, 0.80, 0.9, 0.15])
single_cb = mpl.colorbar.ColorbarBase(ax1, cmap=cm.viridis,
norm=single_norm,
orientation='horizontal')
single_cb.locator = tick_locator
single_cb.update_ticks()
ax1.set_title('Single-qubit U3 error rate')
display(cbl_fig)
plt.close(cbl_fig)
with cx_widget:
cx_fig = plt.figure(figsize=(3, 1))
ax2 = cx_fig.add_axes([0.05, 0.80, 0.9, 0.15])
cx_cb = mpl.colorbar.ColorbarBase(ax2, cmap=cm.viridis,
norm=cx_norm,
orientation='horizontal')
cx_cb.locator = tick_locator
cx_cb.update_ticks()
ax2.set_title('CNOT error rate')
display(cx_fig)
plt.close(cx_fig)
out_box = widgets.GridBox([single_widget, cmap_widget, cx_widget],
layout=widgets.Layout(
grid_template_rows='auto auto',
grid_template_columns='33% 33% 33%',
grid_template_areas=,
grid_gap='0px 0px'))
return out_box | 159,789 |
Widget for displaying job history
Args:
backend (IBMQbackend): The backend.
Returns:
Tab: A tab widget for history images. | def job_history(backend):
year = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
month = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
week = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
tabs = widgets.Tab(layout=widgets.Layout(max_height='620px'))
tabs.children = [year, month, week]
tabs.set_title(0, 'Year')
tabs.set_title(1, 'Month')
tabs.set_title(2, 'Week')
tabs.selected_index = 1
_build_job_history(tabs, backend)
return tabs | 159,790 |
Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance. | def plot_job_history(jobs, interval='year'):
def get_date(job):
return datetime.datetime.strptime(job.creation_date(),
'%Y-%m-%dT%H:%M:%S.%fZ')
current_time = datetime.datetime.now()
if interval == 'year':
bins = [(current_time - datetime.timedelta(days=k*365/12))
for k in range(12)]
elif interval == 'month':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]
elif interval == 'week':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]
binned_jobs = [0]*len(bins)
if interval == 'year':
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
else:
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.day == dat.day and date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
nz_bins = []
nz_idx = []
for ind, val in enumerate(binned_jobs):
if val != 0:
nz_idx.append(ind)
nz_bins.append(val)
total_jobs = sum(binned_jobs)
colors = ['#003f5c', '#ffa600', '#374c80', '#ff764a',
'#7a5195', '#ef5675', '#bc5090']
if interval == 'year':
labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx]
else:
labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]
fig, ax = plt.subplots(1, 1, figsize=(5, 5)) # pylint: disable=invalid-name
ax.pie(nz_bins[::-1], labels=labels, colors=colors, textprops={'fontsize': 14},
rotatelabels=True, counterclock=False)
ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))
ax.text(0, 0, total_jobs, horizontalalignment='center',
verticalalignment='center', fontsize=26)
fig.tight_layout()
return fig | 159,792 |
Create new sample pulse command.
Args:
samples (ndarray): Complex array of pulse envelope.
name (str): Unique name to identify the pulse.
Raises:
PulseError: when pulse envelope amplitude exceeds 1. | def __init__(self, samples, name=None):
super().__init__(duration=len(samples), name=name)
if np.any(np.abs(samples) > 1):
raise PulseError('Absolute value of pulse envelope amplitude exceeds 1.')
self._samples = samples | 159,794 |
Two SamplePulses are the same if they are of the same type
and have the same name and samples.
Args:
other (SamplePulse): other SamplePulse
Returns:
bool: are self and other equal. | def __eq__(self, other):
if super().__eq__(other) and \
(self._samples == other._samples).all():
return True
return False | 159,796 |
Select a PassManager and run a single circuit through it.
Args:
circuit_config_tuple (tuple):
circuit (QuantumCircuit): circuit to transpile
transpile_config (TranspileConfig): configuration dictating how to transpile
Returns:
QuantumCircuit: transpiled circuit | def _transpile_circuit(circuit_config_tuple):
circuit, transpile_config = circuit_config_tuple
# if the pass manager is not already selected, choose an appropriate one.
if transpile_config.pass_manager:
pass_manager = transpile_config.pass_manager
elif transpile_config.coupling_map:
pass_manager = default_pass_manager(transpile_config.basis_gates,
transpile_config.coupling_map,
transpile_config.initial_layout,
transpile_config.seed_transpiler)
else:
pass_manager = default_pass_manager_simulator(transpile_config.basis_gates)
return pass_manager.run(circuit) | 159,804 |
Two Acquires are the same if they are of the same type
and have the same kernel and discriminator.
Args:
other (Acquire): Other Acquire
Returns:
bool: are self and other equal. | def __eq__(self, other):
if type(self) is type(other) and \
self.kernel == other.kernel and \
self.discriminator == other.discriminator:
return True
return False | 159,814 |
Two physical qubits are the same if they have the same index and channels.
Args:
other (Qubit): other Qubit
Returns:
bool: are self and other equal. | def __eq__(self, other):
# pylint: disable=too-many-boolean-expressions
if (type(self) is type(other) and
self._index == other._index and
self._drives == other._drives and
self._controls == other._controls and
self._measures == other._measures and
self._acquires == other._acquires):
return True
return False | 159,820 |
Assembles a list of circuits into a qobj which can be run on the backend.
Args:
circuits (list[QuantumCircuits]): circuit(s) to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
QasmQobj: the Qobj to be run on the backends | def assemble_circuits(circuits, qobj_id=None, qobj_header=None, run_config=None):
qobj_config = QasmQobjConfig()
if run_config:
qobj_config = QasmQobjConfig(**run_config.to_dict())
# Pack everything into the Qobj
experiments = []
max_n_qubits = 0
max_memory_slots = 0
for circuit in circuits:
# header stuff
n_qubits = 0
memory_slots = 0
qubit_labels = []
clbit_labels = []
qreg_sizes = []
creg_sizes = []
for qreg in circuit.qregs:
qreg_sizes.append([qreg.name, qreg.size])
for j in range(qreg.size):
qubit_labels.append([qreg.name, j])
n_qubits += qreg.size
for creg in circuit.cregs:
creg_sizes.append([creg.name, creg.size])
for j in range(creg.size):
clbit_labels.append([creg.name, j])
memory_slots += creg.size
# TODO: why do we need creq_sizes and qreg_sizes in header
# TODO: we need to rethink memory_slots as they are tied to classical bit
experimentheader = QobjExperimentHeader(qubit_labels=qubit_labels,
n_qubits=n_qubits,
qreg_sizes=qreg_sizes,
clbit_labels=clbit_labels,
memory_slots=memory_slots,
creg_sizes=creg_sizes,
name=circuit.name)
# TODO: why do we need n_qubits and memory_slots in both the header and the config
experimentconfig = QasmQobjExperimentConfig(n_qubits=n_qubits, memory_slots=memory_slots)
# Convert conditionals from QASM-style (creg ?= int) to qobj-style
# (register_bit ?= 1), by assuming device has unlimited register slots
# (supported only for simulators). Map all measures to a register matching
# their clbit_index, create a new register slot for every conditional gate
# and add a bfunc to map the creg=val mask onto the gating register bit.
is_conditional_experiment = any(op.control for (op, qargs, cargs) in circuit.data)
max_conditional_idx = 0
instructions = []
for op_context in circuit.data:
instruction = op_context[0].assemble()
# Add register attributes to the instruction
qargs = op_context[1]
cargs = op_context[2]
if qargs:
qubit_indices = [qubit_labels.index([qubit[0].name, qubit[1]])
for qubit in qargs]
instruction.qubits = qubit_indices
if cargs:
clbit_indices = [clbit_labels.index([clbit[0].name, clbit[1]])
for clbit in cargs]
instruction.memory = clbit_indices
# If the experiment has conditional instructions, assume every
# measurement result may be needed for a conditional gate.
if instruction.name == "measure" and is_conditional_experiment:
instruction.register = clbit_indices
# To convert to a qobj-style conditional, insert a bfunc prior
# to the conditional instruction to map the creg ?= val condition
# onto a gating register bit.
if hasattr(instruction, '_control'):
ctrl_reg, ctrl_val = instruction._control
mask = 0
val = 0
for clbit in clbit_labels:
if clbit[0] == ctrl_reg.name:
mask |= (1 << clbit_labels.index(clbit))
val |= (((ctrl_val >> clbit[1]) & 1) << clbit_labels.index(clbit))
conditional_reg_idx = memory_slots + max_conditional_idx
conversion_bfunc = QasmQobjInstruction(name='bfunc',
mask="0x%X" % mask,
relation='==',
val="0x%X" % val,
register=conditional_reg_idx)
instructions.append(conversion_bfunc)
instruction.conditional = conditional_reg_idx
max_conditional_idx += 1
# Delete control attribute now that we have replaced it with
# the conditional and bfuc
del instruction._control
instructions.append(instruction)
experiments.append(QasmQobjExperiment(instructions=instructions, header=experimentheader,
config=experimentconfig))
if n_qubits > max_n_qubits:
max_n_qubits = n_qubits
if memory_slots > max_memory_slots:
max_memory_slots = memory_slots
qobj_config.memory_slots = max_memory_slots
qobj_config.n_qubits = max_n_qubits
return QasmQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header) | 159,826 |
Assembles a list of schedules into a qobj which can be run on the backend.
Args:
schedules (list[Schedule]): schedules to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
PulseQobj: the Qobj to be run on the backends
Raises:
QiskitError: when invalid schedules or configs are provided | def assemble_schedules(schedules, qobj_id=None, qobj_header=None, run_config=None):
qobj_config = QasmQobjConfig()
if run_config:
qobj_config = QasmQobjConfig(**run_config.to_dict())
# Get appropriate convertors
instruction_converter = PulseQobjConverter
instruction_converter = instruction_converter(PulseQobjInstruction, **run_config.to_dict())
lo_converter = LoConfigConverter(PulseQobjExperimentConfig, run_config.qubit_lo_freq,
run_config.meas_lo_freq, **run_config.to_dict())
# Pack everything into the Qobj
qobj_schedules = []
user_pulselib = set()
for idx, schedule in enumerate(schedules):
# instructions
qobj_instructions = []
# Instructions are returned as tuple of shifted time and instruction
for shift, instruction in schedule.instructions:
# TODO: support conditional gate
qobj_instructions.append(instruction_converter(shift, instruction))
if isinstance(instruction, PulseInstruction):
# add samples to pulse library
user_pulselib.add(instruction.command)
# experiment header
qobj_experiment_header = QobjExperimentHeader(
name=schedule.name or 'Experiment-%d' % idx
)
qobj_schedules.append({
'header': qobj_experiment_header,
'instructions': qobj_instructions
})
# setup pulse_library
run_config.pulse_library = [QobjPulseLibrary(name=pulse.name, samples=pulse.samples)
for pulse in user_pulselib]
# create qob experiment field
experiments = []
if len(run_config.schedule_los) == 1:
lo_dict = run_config.schedule_los.pop()
# update global config
q_los = lo_converter.get_qubit_los(lo_dict)
if q_los:
run_config.qubit_lo_freq = q_los
m_los = lo_converter.get_meas_los(lo_dict)
if m_los:
run_config.meas_lo_freq = m_los
if run_config.schedule_los:
# multiple frequency setups
if len(qobj_schedules) == 1:
# frequency sweep
for lo_dict in run_config.schedule_los:
experiments.append(PulseQobjExperiment(
instructions=qobj_schedules[0]['instructions'],
experimentheader=qobj_schedules[0]['header'],
experimentconfig=lo_converter(lo_dict)
))
elif len(qobj_schedules) == len(run_config.schedule_los):
# n:n setup
for lo_dict, schedule in zip(run_config.schedule_los, qobj_schedules):
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
experimentheader=schedule['header'],
experimentconfig=lo_converter(lo_dict)
))
else:
raise QiskitError('Invalid LO setting is specified. '
'The LO should be configured for each schedule, or '
'single setup for all schedules (unique), or '
'multiple setups for a single schedule (frequency sweep),'
'or no LO configured at all.')
else:
# unique frequency setup
for schedule in qobj_schedules:
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
experimentheader=schedule['header'],
))
qobj_config = PulseQobjConfig(**run_config.to_dict())
return PulseQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header) | 159,827 |
Create a hinton representation.
Graphical representation of the input array using a 2D city style
graph (hinton).
Args:
rho (array): Density matrix
figsize (tuple): Figure size in pixels. | def iplot_state_hinton(rho, figsize=None):
# HTML
html_template = Template()
# JavaScript
javascript_template = Template()
rho = _validate_input_state(rho)
if figsize is None:
options = {}
else:
options = {'width': figsize[0], 'height': figsize[1]}
# Process data and execute
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
# Process data and execute
real = []
imag = []
for xvalue in rho:
row_real = []
col_imag = []
for value_real in xvalue.real:
row_real.append(float(value_real))
real.append(row_real)
for value_imag in xvalue.imag:
col_imag.append(float(value_imag))
imag.append(col_imag)
html = html_template.substitute({
'divNumber': div_number
})
javascript = javascript_template.substitute({
'divNumber': div_number,
'executions': [{'data': real}, {'data': imag}],
'options': options
})
display(HTML(html + javascript)) | 159,833 |
Create new measurement options.
Parameters:
name (str): Name of measurement option to be used. | def __init__(self, name=None, **params):
self._name = name
self._params = params | 159,834 |
Two measurement options are the same if they are of the same type
and have the same name and params.
Args:
other (MeasOpts): Other Discriminator/Kernel.
Returns:
bool: are self and other equal. | def __eq__(self, other):
if type(self) is type(other) and \
self._name == other._name and \
self._params == other._params:
return True
return False | 159,835 |
Create device specification with specified `qubits`.
Args:
qubits: | def __init__(self,
qubits: List[Qubit],
registers: List[RegisterSlot],
mem_slots: List[MemorySlot]):
self._qubits = qubits
self._reg_slots = registers
self._mem_slots = mem_slots | 159,845 |
Create device specification with values in backend configuration.
Args:
backend(Backend): backend configuration
Returns:
DeviceSpecification: created device specification
Raises:
PulseError: when an invalid backend is specified | def create_from(cls, backend):
backend_config = backend.configuration()
# TODO : Remove usage of config.defaults when backend.defaults() is updated.
try:
backend_default = backend.defaults()
except ModelValidationError:
from collections import namedtuple
BackendDefault = namedtuple('BackendDefault', ('qubit_freq_est', 'meas_freq_est'))
backend_default = BackendDefault(
qubit_freq_est=backend_config.defaults['qubit_freq_est'],
meas_freq_est=backend_config.defaults['meas_freq_est']
)
# system size
n_qubits = backend_config.n_qubits
n_registers = backend_config.n_registers
n_uchannels = backend_config.n_uchannels
if n_uchannels > 0 and n_uchannels != n_qubits:
raise PulseError("This version assumes no U-channels or #U-cannels==#qubits.")
# frequency information
qubit_lo_freqs = backend_default.qubit_freq_est
qubit_lo_ranges = backend_config.qubit_lo_range
meas_lo_freqs = backend_default.meas_freq_est
meas_lo_ranges = backend_config.meas_lo_range
# generate channels with assuming their numberings are aligned with qubits
drives = [
DriveChannel(i, qubit_lo_freqs[i], tuple(qubit_lo_ranges[i]))
for i in range(n_qubits)
]
measures = [
MeasureChannel(i, meas_lo_freqs[i], tuple(meas_lo_ranges[i]))
for i in range(n_qubits)
]
acquires = [AcquireChannel(i) for i in range(n_qubits)]
controls = [ControlChannel(i) for i in range(n_uchannels)]
qubits = []
for i in range(n_qubits):
# TODO: get qubits <-> channels relationship from backend
qubit = Qubit(i,
drive_channels=[drives[i]],
control_channels=None if n_uchannels == 0 else controls[i],
measure_channels=[measures[i]],
acquire_channels=[acquires[i]])
qubits.append(qubit)
registers = [RegisterSlot(i) for i in range(n_registers)]
# TODO: get #mem_slots from backend
mem_slots = [MemorySlot(i) for i in range(len(qubits))]
return DeviceSpecification(qubits, registers, mem_slots) | 159,846 |
Two device specs are the same if they have the same qubits.
Args:
other (DeviceSpecification): other DeviceSpecification
Returns:
bool: are self and other equal. | def __eq__(self, other):
if type(self) is type(other) and \
self._qubits == other._qubits:
return True
return False | 159,847 |
Map each qubit in block_qargs to its wire position among the block's wires.
Args:
block_qargs (list): list of qubits that a block acts on
global_index_map (dict): mapping from each qubit in the
circuit to its wire position within that circuit
Returns:
dict: mapping from qarg to position in block | def _block_qargs_to_indices(self, block_qargs, global_index_map):
block_indices = [global_index_map[q] for q in block_qargs]
ordered_block_indices = sorted(block_indices)
block_positions = {q: ordered_block_indices.index(global_index_map[q])
for q in block_qargs}
return block_positions | 159,849 |
Converter decorator method.
Pulse instruction converter is defined for each instruction type,
and this decorator binds converter function to valid instruction type.
Args:
type_instruction (Instruction): valid pulse instruction class to the converter. | def __call__(self, type_instruction):
# pylint: disable=missing-return-doc, missing-return-type-doc
def _apply_converter(converter):
@functools.wraps(converter)
def _call_valid_converter(self, shift, instruction):
if isinstance(instruction, type_instruction):
return converter(self, shift, instruction)
else:
raise PulseError('Supplied instruction {0} '
'is not of type {1}.'.format(instruction, type_instruction))
# Track conversion methods for class.
self._bound_instructions[type_instruction] = _call_valid_converter
return _call_valid_converter
return _apply_converter | 159,850 |
Create new converter.
Args:
qobj_model (QobjInstruction): marshmallow model to serialize to object.
run_config (dict): experimental configuration. | def __init__(self, qobj_model, **run_config):
self._qobj_model = qobj_model
self._run_config = run_config | 159,852 |
Return converted `AcquireInstruction`.
Args:
shift(int): Offset time.
instruction (AcquireInstruction): acquire instruction.
Returns:
dict: Dictionary of required parameters. | def convert_acquire(self, shift, instruction):
meas_level = self._run_config.get('meas_level', 2)
command_dict = {
'name': 'acquire',
't0': shift+instruction.start_time,
'duration': instruction.duration,
'qubits': [q.index for q in instruction.acquires],
'memory_slot': [m.index for m in instruction.mem_slots]
}
if meas_level == 2:
# setup discriminators
if instruction.command.discriminator:
command_dict.update({
'discriminators': [
QobjMeasurementOption(
name=instruction.command.discriminator.name,
params=instruction.command.discriminator.params)
]
})
# setup register_slots
command_dict.update({
'register_slot': [regs.index for regs in instruction.reg_slots]
})
if meas_level >= 1:
# setup kernels
if instruction.command.kernel:
command_dict.update({
'kernels': [
QobjMeasurementOption(
name=instruction.command.kernel.name,
params=instruction.command.kernel.params)
]
})
return self._qobj_model(**command_dict) | 159,854 |
Return converted `FrameChangeInstruction`.
Args:
shift(int): Offset time.
instruction (FrameChangeInstruction): frame change instruction.
Returns:
dict: Dictionary of required parameters. | def convert_frame_change(self, shift, instruction):
command_dict = {
'name': 'fc',
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name,
'phase': instruction.command.phase
}
return self._qobj_model(**command_dict) | 159,855 |
Return converted `PersistentValueInstruction`.
Args:
shift(int): Offset time.
instruction (PersistentValueInstruction): persistent value instruction.
Returns:
dict: Dictionary of required parameters. | def convert_persistent_value(self, shift, instruction):
command_dict = {
'name': 'pv',
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name,
'val': instruction.command.value
}
return self._qobj_model(**command_dict) | 159,856 |
Return converted `PulseInstruction`.
Args:
shift(int): Offset time.
instruction (PulseInstruction): drive instruction.
Returns:
dict: Dictionary of required parameters. | def convert_drive(self, shift, instruction):
command_dict = {
'name': instruction.command.name,
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name
}
return self._qobj_model(**command_dict) | 159,857 |
Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters. | def convert_snapshot(self, shift, instruction):
command_dict = {
'name': 'snapshot',
't0': shift+instruction.start_time,
'label': instruction.name,
'type': instruction.type
}
return self._qobj_model(**command_dict) | 159,858 |
Update annotations of discretized continuous pulse function with duration.
Args:
discretized_pulse: Discretized decorated continuous pulse. | def _update_annotations(discretized_pulse: Callable) -> Callable:
undecorated_annotations = list(discretized_pulse.__annotations__.items())
decorated_annotations = undecorated_annotations[1:]
decorated_annotations.insert(0, ('duration', int))
discretized_pulse.__annotations__ = dict(decorated_annotations)
return discretized_pulse | 159,859 |
Update annotations of discretized continuous pulse function.
Args:
discretized_pulse: Discretized decorated continuous pulse.
sampler_inst: Applied sampler. | def _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable:
wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s')
header, body = wrapped_docstring.split('\n', 1)
body = textwrap.indent(body, ' ')
wrapped_docstring = header+body
updated_ds = .format(continuous_name=discretized_pulse.__name__,
sampler_name=sampler_inst.__name__,
continuous_doc=wrapped_docstring)
discretized_pulse.__doc__ = updated_ds
return discretized_pulse | 159,860 |
Build a ``QuantumCircuit`` object from a ``DAGCircuit``.
Args:
dag (DAGCircuit): the input dag.
Return:
QuantumCircuit: the circuit representing the input dag. | def dag_to_circuit(dag):
qregs = collections.OrderedDict()
for qreg in dag.qregs.values():
qreg_tmp = QuantumRegister(qreg.size, name=qreg.name)
qregs[qreg.name] = qreg_tmp
cregs = collections.OrderedDict()
for creg in dag.cregs.values():
creg_tmp = ClassicalRegister(creg.size, name=creg.name)
cregs[creg.name] = creg_tmp
name = dag.name or None
circuit = QuantumCircuit(*qregs.values(), *cregs.values(), name=name)
for node in dag.topological_op_nodes():
qubits = []
for qubit in node.qargs:
qubits.append(qregs[qubit[0].name][qubit[1]])
clbits = []
for clbit in node.cargs:
clbits.append(cregs[clbit[0].name][clbit[1]])
# Get arguments for classical control (if any)
if node.condition is None:
control = None
else:
control = (node.condition[0], node.condition[1])
inst = node.op.copy()
inst.control = control
circuit.append(inst, qubits, clbits)
return circuit | 159,864 |
Two FrameChanges are the same if they are of the same type
and have the same phase.
Args:
other (FrameChange): other FrameChange
Returns:
bool: are self and other equal. | def __eq__(self, other):
if type(self) is type(other) and \
self.phase == other.phase:
return True
return False | 159,867 |
Return a basis state ndarray.
Args:
str_state (string): a string representing the state.
num (int): the number of qubits
Returns:
ndarray: state(2**num) a quantum state with basis basis state.
Raises:
QiskitError: if the dimensions is wrong | def basis_state(str_state, num):
n = int(str_state, 2)
if num >= len(str_state):
state = np.zeros(1 << num, dtype=complex)
state[n] = 1
return state
else:
raise QiskitError('size of bitstring is greater than num.') | 159,921 |
maps a pure state to a state matrix
Args:
state (ndarray): the number of qubits
flatten (bool): determine if state matrix of column work
Returns:
ndarray: state_mat(2**num, 2**num) if flatten is false
ndarray: state_mat(4**num) if flatten is true stacked on by the column | def projector(state, flatten=False):
density_matrix = np.outer(state.conjugate(), state)
if flatten:
return density_matrix.flatten(order='F')
return density_matrix | 159,922 |
Calculate the purity of a quantum state.
Args:
state (ndarray): a quantum state
Returns:
float: purity. | def purity(state):
rho = np.array(state)
if rho.ndim == 1:
return 1.0
return np.real(np.trace(rho.dot(rho))) | 159,923 |
Flips the cx nodes to match the directed coupling map.
Args:
dag (DAGCircuit): DAG to map.
Returns:
DAGCircuit: The rearranged dag for the coupling map
Raises:
TranspilerError: If the circuit cannot be mapped just by flipping the
cx nodes. | def run(self, dag):
new_dag = DAGCircuit()
if self.layout is None:
# LegacySwap renames the register in the DAG and does not match the property set
self.layout = Layout.generate_trivial_layout(*dag.qregs.values())
for layer in dag.serial_layers():
subdag = layer['graph']
for cnot_node in subdag.named_nodes('cx', 'CX'):
control = cnot_node.qargs[0]
target = cnot_node.qargs[1]
physical_q0 = self.layout[control]
physical_q1 = self.layout[target]
if self.coupling_map.distance(physical_q0, physical_q1) != 1:
raise TranspilerError('The circuit requires a connection between physical '
'qubits %s and %s' % (physical_q0, physical_q1))
if (physical_q0, physical_q1) not in self.coupling_map.get_edges():
# A flip needs to be done
# Create the involved registers
if control[0] not in subdag.qregs.values():
subdag.add_qreg(control[0])
if target[0] not in subdag.qregs.values():
subdag.add_qreg(target[0])
# Add H gates around
subdag.apply_operation_back(HGate(), [target], [])
subdag.apply_operation_back(HGate(), [control], [])
subdag.apply_operation_front(HGate(), [target], [])
subdag.apply_operation_front(HGate(), [control], [])
# Flips the CX
cnot_node.qargs[0], cnot_node.qargs[1] = target, control
new_dag.extend_back(subdag)
return new_dag | 159,937 |
Run one pass of cx cancellation on the circuit
Args:
dag (DAGCircuit): the directed acyclic graph to run on.
Returns:
DAGCircuit: Transformed DAG. | def run(self, dag):
cx_runs = dag.collect_runs(["cx"])
for cx_run in cx_runs:
# Partition the cx_run into chunks with equal gate arguments
partition = []
chunk = []
for i in range(len(cx_run) - 1):
chunk.append(cx_run[i])
qargs0 = cx_run[i].qargs
qargs1 = cx_run[i + 1].qargs
if qargs0 != qargs1:
partition.append(chunk)
chunk = []
chunk.append(cx_run[-1])
partition.append(chunk)
# Simplify each chunk in the partition
for chunk in partition:
if len(chunk) % 2 == 0:
for n in chunk:
dag.remove_op_node(n)
else:
for n in chunk[1:]:
dag.remove_op_node(n)
return dag | 159,938 |
Return a single backend matching the specified filtering.
Args:
name (str): name of the backend.
**kwargs (dict): dict used for filtering.
Returns:
BaseBackend: a backend matching the filtering.
Raises:
QiskitBackendNotFoundError: if no backend could be found or
more than one backend matches. | def get_backend(self, name=None, **kwargs):
backends = self.backends(name, **kwargs)
if len(backends) > 1:
raise QiskitBackendNotFoundError('More than one backend matches the criteria')
elif not backends:
raise QiskitBackendNotFoundError('No backend matches the criteria')
return backends[0] | 159,939 |
The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Choi: the matrix power of the SuperOp converted to a Choi channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. | def power(self, n):
if n > 0:
return super().power(n)
return Choi(SuperOp(self).power(n)) | 159,944 |
Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
DensityMatrix: the output quantum state as a density matrix.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions. | def _evolve(self, state, qargs=None):
# If subsystem evolution we use the SuperOp representation
if qargs is not None:
return SuperOp(self)._evolve(state, qargs)
# Otherwise we compute full evolution directly
state = self._format_state(state, density_matrix=True)
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is not equal to state dimension."
)
return np.einsum('AB,AiBj->ij', state,
np.reshape(self._data, self._bipartite_shape)) | 159,945 |
Return the tensor product channel.
Args:
other (QuantumChannel): a quantum channel.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False
Returns:
Choi: the tensor product channel as a Choi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass. | def _tensor_product(self, other, reverse=False):
# Convert other to Choi
if not isinstance(other, Choi):
other = Choi(other)
if reverse:
input_dims = self.input_dims() + other.input_dims()
output_dims = self.output_dims() + other.output_dims()
data = _bipartite_tensor(
other.data,
self._data,
shape1=other._bipartite_shape,
shape2=self._bipartite_shape)
else:
input_dims = other.input_dims() + self.input_dims()
output_dims = other.output_dims() + self.output_dims()
data = _bipartite_tensor(
self._data,
other.data,
shape1=self._bipartite_shape,
shape2=other._bipartite_shape)
return Choi(data, input_dims, output_dims) | 159,946 |
Get the number and size of unique registers from bit_labels list.
Args:
bit_labels (list): this list is of the form::
[['reg1', 0], ['reg1', 1], ['reg2', 0]]
which indicates a register named "reg1" of size 2
and a register named "reg2" of size 1. This is the
format of classic and quantum bit labels in qobj
header.
Yields:
tuple: iterator of register_name:size pairs. | def _get_register_specs(bit_labels):
it = itertools.groupby(bit_labels, operator.itemgetter(0))
for register_name, sub_it in it:
yield register_name, max(ind[1] for ind in sub_it) + 1 | 159,947 |
Truncate long floats
Args:
matchobj (re.Match): contains original float
format_str (str): format specifier
Returns:
str: returns truncated float | def _truncate_float(matchobj, format_str='0.2g'):
if matchobj.group(0):
return format(float(matchobj.group(0)), format_str)
return '' | 159,948 |
Get the index number for a quantum bit
Args:
qubit (tuple): The tuple of the bit of the form
(register_name, bit_number)
Returns:
int: The index in the bit list
Raises:
VisualizationError: If the bit isn't found | def _get_qubit_index(self, qubit):
for i, bit in enumerate(self.qubit_list):
if qubit == bit:
qindex = i
break
else:
raise exceptions.VisualizationError("unable to find bit for operation")
return qindex | 159,956 |
Loads the QObj schema for use in future validations.
Caches schema in _SCHEMAS module attribute.
Args:
file_path(str): Path to schema.
name(str): Given name for schema. Defaults to file_path filename
without schema.
Return:
schema(dict): Loaded schema. | def _load_schema(file_path, name=None):
if name is None:
# filename without extension
name = os.path.splitext(os.path.basename(file_path))[0]
if name not in _SCHEMAS:
with open(file_path, 'r') as schema_file:
_SCHEMAS[name] = json.load(schema_file)
return _SCHEMAS[name] | 159,957 |
Return a random quantum state from the uniform (Haar) measure on
state space.
Args:
dim (int): the dim of the state spaxe
seed (int): Optional. To set a random seed.
Returns:
ndarray: state(2**num) a random quantum state. | def random_state(dim, seed=None):
if seed is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
rng = np.random.RandomState(seed)
# Random array over interval (0, 1]
x = rng.rand(dim)
x += x == 0
x = -np.log(x)
sumx = sum(x)
phases = rng.rand(dim)*2.0*np.pi
return np.sqrt(x/sumx)*np.exp(1j*phases) | 159,972 |
Return a random dim x dim unitary Operator from the Haar measure.
Args:
dim (int): the dim of the state space.
seed (int): Optional. To set a random seed.
Returns:
Operator: (dim, dim) unitary operator.
Raises:
QiskitError: if dim is not a positive power of 2. | def random_unitary(dim, seed=None):
if dim == 0 or not math.log2(dim).is_integer():
raise QiskitError("Desired unitary dimension not a positive power of 2.")
matrix = np.zeros([dim, dim], dtype=complex)
for j in range(dim):
if j == 0:
a = random_state(dim, seed)
else:
a = random_state(dim)
matrix[:, j] = np.copy(a)
# Grahm-Schmidt Orthogonalize
i = j-1
while i >= 0:
dc = np.vdot(matrix[:, i], a)
matrix[:, j] = matrix[:, j]-dc*matrix[:, i]
i = i - 1
# normalize
matrix[:, j] = matrix[:, j] * (1.0 / np.sqrt(np.vdot(matrix[:, j], matrix[:, j])))
return Operator(matrix) | 159,973 |
Return a normally distributed complex random matrix.
Args:
nrow (int): number of rows in output matrix.
ncol (int): number of columns in output matrix.
seed (int): Optional. To set a random seed.
Returns:
ndarray: A complex rectangular matrix where each real and imaginary
entry is sampled from the normal distribution. | def __ginibre_matrix(nrow, ncol=None, seed=None):
if ncol is None:
ncol = nrow
if seed is not None:
np.random.seed(seed)
G = np.random.normal(size=(nrow, ncol)) + \
np.random.normal(size=(nrow, ncol)) * 1j
return G | 159,975 |
Generate a random density matrix from the Hilbert-Schmidt metric.
Args:
N (int): the length of the density matrix.
rank (int or None): the rank of the density matrix. The default
value is full-rank.
seed (int): Optional. To set a random seed.
Returns:
ndarray: rho (N,N a density matrix. | def __random_density_hs(N, rank=None, seed=None):
G = __ginibre_matrix(N, rank, seed)
G = G.dot(G.conj().T)
return G / np.trace(G) | 159,976 |
Generate a random density matrix from the Bures metric.
Args:
N (int): the length of the density matrix.
rank (int or None): the rank of the density matrix. The default
value is full-rank.
seed (int): Optional. To set a random seed.
Returns:
ndarray: rho (N,N) a density matrix. | def __random_density_bures(N, rank=None, seed=None):
P = np.eye(N) + random_unitary(N).data
G = P.dot(__ginibre_matrix(N, rank, seed))
G = G.dot(G.conj().T)
return G / np.trace(G) | 159,977 |
The matrix power of the channel.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
Chi: the matrix power of the SuperOp converted to a Chi channel.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. | def power(self, n):
if n > 0:
return super().power(n)
return Chi(SuperOp(self).power(n)) | 159,985 |
Return the QuantumChannel self + other.
Args:
other (QuantumChannel): a quantum channel.
Returns:
Chi: the linear addition self + other as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions. | def add(self, other):
if not isinstance(other, Chi):
other = Chi(other)
if self.dim != other.dim:
raise QiskitError("other QuantumChannel dimensions are not equal")
return Chi(self._data + other.data, self._input_dims,
self._output_dims) | 159,986 |
Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
Chi: the scalar multiplication other * self as a Chi object.
Raises:
QiskitError: if other is not a valid scalar. | def multiply(self, other):
if not isinstance(other, Number):
raise QiskitError("other is not a number")
return Chi(other * self._data, self._input_dims, self._output_dims) | 159,987 |
Return the tensor product channel.
Args:
other (QuantumChannel): a quantum channel.
reverse (bool): If False return self ⊗ other, if True return
if True return (other ⊗ self) [Default: False
Returns:
Chi: the tensor product channel as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass. | def _tensor_product(self, other, reverse=False):
if not isinstance(other, Chi):
other = Chi(other)
if reverse:
input_dims = self.input_dims() + other.input_dims()
output_dims = self.output_dims() + other.output_dims()
data = np.kron(other.data, self._data)
else:
input_dims = other.input_dims() + self.input_dims()
output_dims = other.output_dims() + self.output_dims()
data = np.kron(self._data, other.data)
return Chi(data, input_dims, output_dims) | 159,988 |
Return the compose of a QuantumChannel with itself n times.
Args:
n (int): compute the matrix power of the superoperator matrix.
Returns:
SuperOp: the n-times composition channel as a SuperOp object.
Raises:
QiskitError: if the input and output dimensions of the
QuantumChannel are not equal, or the power is not an integer. | def power(self, n):
if not isinstance(n, (int, np.integer)):
raise QiskitError("Can only power with integer powers.")
if self._input_dim != self._output_dim:
raise QiskitError("Can only power with input_dim = output_dim.")
# Override base class power so we can implement more efficiently
# using Numpy.matrix_power
return SuperOp(
np.linalg.matrix_power(self._data, n), self.input_dims(),
self.output_dims()) | 159,993 |
Return the QuantumChannel self + other.
Args:
other (QuantumChannel): a quantum channel.
Returns:
SuperOp: the linear addition self + other as a SuperOp object.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions. | def add(self, other):
# Convert other to SuperOp
if not isinstance(other, SuperOp):
other = SuperOp(other)
if self.dim != other.dim:
raise QiskitError("other QuantumChannel dimensions are not equal")
return SuperOp(self._data + other.data, self.input_dims(),
self.output_dims()) | 159,994 |
Return the QuantumChannel self + other.
Args:
other (complex): a complex number.
Returns:
SuperOp: the scalar multiplication other * self as a SuperOp object.
Raises:
QiskitError: if other is not a valid scalar. | def multiply(self, other):
if not isinstance(other, Number):
raise QiskitError("other is not a number")
return SuperOp(other * self._data, self.input_dims(),
self.output_dims()) | 159,995 |
Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
DensityMatrix: the output quantum state as a density matrix.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions. | def _evolve(self, state, qargs=None):
state = self._format_state(state, density_matrix=True)
if qargs is None:
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is not equal to state dimension."
)
shape_in = self._input_dim * self._input_dim
shape_out = (self._output_dim, self._output_dim)
# Return evolved density matrix
return np.reshape(
np.dot(self._data, np.reshape(state, shape_in, order='F')),
shape_out,
order='F')
# Subsystem evolution
return self._evolve_subsystem(state, qargs) | 159,996 |
Evolve a quantum state by the operator.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
specified QuantumState subsystem dimensions. | def _evolve_subsystem(self, state, qargs):
mat = np.reshape(self.data, self._shape)
# Hack to assume state is a N-qubit state until a proper class for states
# is in place
state_size = len(state)
state_dims = self._automatic_dims(None, state_size)
if self.input_dims() != len(qargs) * (2, ):
raise QiskitError(
"Channel input dimensions are not compatible with state subsystem dimensions."
)
# Return evolved density matrix
tensor = np.reshape(state, 2 * state_dims)
num_inidices = len(state_dims)
indices = [num_inidices - 1 - qubit for qubit in qargs
] + [2 * num_inidices - 1 - qubit for qubit in qargs]
tensor = self._einsum_matmul(tensor, mat, indices)
return np.reshape(tensor, [state_size, state_size]) | 159,998 |
Expand 3+ qubit gates using their decomposition rules.
Args:
dag(DAGCircuit): input dag
Returns:
DAGCircuit: output dag with maximum node degrees of 2
Raises:
QiskitError: if a 3q+ gate is not decomposable | def run(self, dag):
for node in dag.threeQ_or_more_gates():
# TODO: allow choosing other possible decompositions
rule = node.op.definition
if not rule:
raise QiskitError("Cannot unroll all 3q or more gates. "
"No rule to expand instruction %s." %
node.op.name)
# hacky way to build a dag on the same register as the rule is defined
# TODO: need anonymous rules to address wires by index
decomposition = DAGCircuit()
decomposition.add_qreg(rule[0][1][0][0])
for inst in rule:
decomposition.apply_operation_back(*inst)
decomposition = self.run(decomposition) # recursively unroll
dag.substitute_node_with_dag(node, decomposition)
return dag | 160,004 |
Expand a given gate into its decomposition.
Args:
dag(DAGCircuit): input dag
Returns:
DAGCircuit: output dag where gate was expanded. | def run(self, dag):
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
# opaque or built-in gates are not decomposable
if not node.op.definition:
continue
# TODO: allow choosing among multiple decomposition rules
rule = node.op.definition
# hacky way to build a dag on the same register as the rule is defined
# TODO: need anonymous rules to address wires by index
decomposition = DAGCircuit()
decomposition.add_qreg(rule[0][1][0][0])
if rule[0][2]:
decomposition.add_creg(rule[0][2][0][0])
for inst in rule:
decomposition.apply_operation_back(*inst)
dag.substitute_node_with_dag(node, decomposition)
return dag | 160,005 |
Create a gate from a numeric unitary matrix.
Args:
data (matrix or Operator): unitary operator.
label (str): unitary name for backend [Default: None].
Raises:
ExtensionError: if input data is not an N-qubit unitary operator. | def __init__(self, data, label=None):
if hasattr(data, 'to_matrix'):
# If input is Gate subclass or some other class object that has
# a to_matrix method this will call that method.
data = data.to_matrix()
elif hasattr(data, 'to_operator'):
# If input is a BaseOperator subclass this attempts to convert
# the object to an Operator so that we can extract the underlying
# numpy matrix from `Operator.data`.
data = data.to_operator().data
# Convert to numpy array incase not already an array
data = numpy.array(data, dtype=complex)
# Check input is unitary
if not is_unitary_matrix(data):
raise ExtensionError("Input matrix is not unitary.")
# Check input is N-qubit matrix
input_dim, output_dim = data.shape
n_qubits = int(numpy.log2(input_dim))
if input_dim != output_dim or 2**n_qubits != input_dim:
raise ExtensionError(
"Input matrix is not an N-qubit operator.")
# Store instruction params
super().__init__('unitary', n_qubits, [data], label=label) | 160,007 |
Return the compose of a operator with itself n times.
Args:
n (int): the number of times to compose with self (n>0).
Returns:
BaseOperator: the n-times composed operator.
Raises:
QiskitError: if the input and output dimensions of the operator
are not equal, or the power is not a positive integer. | def power(self, n):
# NOTE: if a subclass can have negative or non-integer powers
# this method should be overriden in that class.
if not isinstance(n, (int, np.integer)) or n < 1:
raise QiskitError("Can only power with positive integer powers.")
if self._input_dim != self._output_dim:
raise QiskitError("Can only power with input_dim = output_dim.")
ret = self.copy()
for _ in range(1, n):
ret = ret.compose(self)
return ret | 160,022 |
Apply real scalar function to singular values of a matrix.
Args:
a (array_like): (N, N) Matrix at which to evaluate the function.
func (callable): Callable object that evaluates a scalar function f.
Returns:
ndarray: funm (N, N) Value of the matrix function specified by func
evaluated at `A`. | def _funm_svd(a, func):
U, s, Vh = la.svd(a, lapack_driver='gesvd')
S = np.diag(func(s))
return U.dot(S).dot(Vh) | 160,035 |
If `dag` is mapped to `coupling_map`, the property
`is_swap_mapped` is set to True (or to False otherwise).
Args:
dag (DAGCircuit): DAG to map. | def run(self, dag):
if self.layout is None:
if self.property_set["layout"]:
self.layout = self.property_set["layout"]
else:
self.layout = Layout.generate_trivial_layout(*dag.qregs.values())
self.property_set['is_swap_mapped'] = True
for gate in dag.twoQ_gates():
physical_q0 = self.layout[gate.qargs[0]]
physical_q1 = self.layout[gate.qargs[1]]
if self.coupling_map.distance(physical_q0, physical_q1) != 1:
self.property_set['is_swap_mapped'] = False
return | 160,036 |
Initializes the asynchronous job.
Args:
backend (BaseBackend): the backend used to run the job.
job_id (str): a unique id in the context of the backend used to run
the job. | def __init__(self, backend, job_id):
self._job_id = job_id
self._backend = backend | 160,037 |
Set snapshot label to name
Args:
name (str or None): label to assign unitary
Raises:
TypeError: name is not string or None. | def label(self, name):
if isinstance(name, str):
self._label = name
else:
raise TypeError('label expects a string') | 160,042 |
If `dag` is mapped and the direction is correct the property
`is_direction_mapped` is set to True (or to False otherwise).
Args:
dag (DAGCircuit): DAG to check. | def run(self, dag):
if self.layout is None:
if self.property_set["layout"]:
self.layout = self.property_set["layout"]
else:
self.layout = Layout.generate_trivial_layout(*dag.qregs.values())
self.property_set['is_direction_mapped'] = True
edges = self.coupling_map.get_edges()
for gate in dag.twoQ_gates():
physical_q0 = self.layout[gate.qargs[0]]
physical_q1 = self.layout[gate.qargs[1]]
if isinstance(gate.op, (CXBase, CnotGate)) and (
physical_q0, physical_q1) not in edges:
self.property_set['is_direction_mapped'] = False
return | 160,053 |
Create an ApplicationProfile instance.
Args:
mackup (Mackup)
files (list) | def __init__(self, mackup, files, dry_run, verbose):
assert isinstance(mackup, Mackup)
assert isinstance(files, set)
self.mackup = mackup
self.files = list(files)
self.dry_run = dry_run
self.verbose = verbose | 160,090 |
Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str) | def getFilepaths(self, filename):
return (os.path.join(os.environ['HOME'], filename),
os.path.join(self.mackup.mackup_folder, filename)) | 160,091 |
Ask the user if he really want something to happen.
Args:
question(str): What can happen
Returns:
(boolean): Confirmed or not | def confirm(question):
if FORCE_YES:
return True
while True:
answer = input(question + ' <Yes|No>').lower()
if answer == 'yes' or answer == 'y':
confirmed = True
break
if answer == 'no' or answer == 'n':
confirmed = False
break
return confirmed | 160,104 |
Delete the given file, directory or link.
It Should support undelete later on.
Args:
filepath (str): Absolute full path to a file. e.g. /path/to/file | def delete(filepath):
# Some files have ACLs, let's remove them recursively
remove_acl(filepath)
# Some files have immutable attributes, let's remove them recursively
remove_immutable_attribute(filepath)
# Finally remove the files and folders
if os.path.isfile(filepath) or os.path.islink(filepath):
os.remove(filepath)
elif os.path.isdir(filepath):
shutil.rmtree(filepath) | 160,105 |
Create a link to a target file or a folder.
For simplicity sake, both target and link_to must be absolute path and must
include the filename of the file or folder.
Also do not include any trailing slash.
e.g. link('/path/to/file', '/path/to/link')
But not: link('/path/to/file', 'path/to/')
or link('/path/to/folder/', '/path/to/link')
Args:
target (str): file or folder the link will point to
link_to (str): Link to create | def link(target, link_to):
assert isinstance(target, str)
assert os.path.exists(target)
assert isinstance(link_to, str)
# Create the path to the link if it does not exists
abs_path = os.path.dirname(os.path.abspath(link_to))
if not os.path.isdir(abs_path):
os.makedirs(abs_path)
# Make sure the file or folder recursively has the good mode
chmod(target)
# Create the link to target
os.symlink(target, link_to) | 160,107 |
Recursively set the chmod for files to 0600 and 0700 for folders.
It's ok unless we need something more specific.
Args:
target (str): Root file or folder | def chmod(target):
assert isinstance(target, str)
assert os.path.exists(target)
file_mode = stat.S_IRUSR | stat.S_IWUSR
folder_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
# Remove the immutable attribute recursively if there is one
remove_immutable_attribute(target)
if os.path.isfile(target):
os.chmod(target, file_mode)
elif os.path.isdir(target):
# chmod the root item
os.chmod(target, folder_mode)
# chmod recursively in the folder it it's one
for root, dirs, files in os.walk(target):
for cur_dir in dirs:
os.chmod(os.path.join(root, cur_dir), folder_mode)
for cur_file in files:
os.chmod(os.path.join(root, cur_file), file_mode)
else:
raise ValueError("Unsupported file type: {}".format(target)) | 160,108 |
Throw an error with the given message and immediately quit.
Args:
message(str): The message to display. | def error(message):
fail = '\033[91m'
end = '\033[0m'
sys.exit(fail + "Error: {}".format(message) + end) | 160,109 |
Check if a process with the given name is running.
Args:
(str): Process name, e.g. "Sublime Text"
Returns:
(bool): True if the process is running | def is_process_running(process_name):
is_running = False
# On systems with pgrep, check if the given process is running
if os.path.isfile('/usr/bin/pgrep'):
dev_null = open(os.devnull, 'wb')
returncode = subprocess.call(['/usr/bin/pgrep', process_name],
stdout=dev_null)
is_running = bool(returncode == 0)
return is_running | 160,115 |
Remove the ACL of the file or folder located on the given path.
Also remove the ACL of any file and folder below the given one,
recursively.
Args:
path (str): Path to the file or folder to remove the ACL for,
recursively. | def remove_acl(path):
# Some files have ACLs, let's remove them recursively
if (platform.system() == constants.PLATFORM_DARWIN and
os.path.isfile('/bin/chmod')):
subprocess.call(['/bin/chmod', '-R', '-N', path])
elif ((platform.system() == constants.PLATFORM_LINUX) and
os.path.isfile('/bin/setfacl')):
subprocess.call(['/bin/setfacl', '-R', '-b', path]) | 160,116 |
Create a Config instance.
Args:
filename (str): Optional filename of the config file. If empty,
defaults to MACKUP_CONFIG_FILE | def __init__(self, filename=None):
assert isinstance(filename, str) or filename is None
# Initialize the parser
self._parser = self._setup_parser(filename)
# Do we have an old config file ?
self._warn_on_old_config()
# Get the storage engine
self._engine = self._parse_engine()
# Get the path where the Mackup folder is
self._path = self._parse_path()
# Get the directory replacing 'Mackup', if any
self._directory = self._parse_directory()
# Get the list of apps to ignore
self._apps_to_ignore = self._parse_apps_to_ignore()
# Get the list of apps to allow
self._apps_to_sync = self._parse_apps_to_sync() | 160,118 |
Configure the ConfigParser instance the way we want it.
Args:
filename (str) or None
Returns:
SafeConfigParser | def _setup_parser(self, filename=None):
assert isinstance(filename, str) or filename is None
# If we are not overriding the config filename
if not filename:
filename = MACKUP_CONFIG_FILE
parser = configparser.SafeConfigParser(allow_no_value=True)
parser.read(os.path.join(os.path.join(os.environ['HOME'], filename)))
return parser | 160,120 |
Build tabula-py option from template file
Args:
file_like_obj: File like object of Tabula app template
Returns:
`obj`:dict: tabula-py options | def load_template(path_or_buffer):
from itertools import groupby
from operator import itemgetter
path_or_buffer = _stringify_path(path_or_buffer)
if is_file_like(path_or_buffer):
templates = json.load(path_or_buffer)
else:
with open(path_or_buffer, 'r') as f:
templates = json.load(f)
options = []
grouper = itemgetter('page', 'extraction_method')
for key, grp in groupby(sorted(templates, key=grouper), grouper):
tmp_options = [_convert_template_option(e) for e in grp]
if len(tmp_options) == 1:
options.append(tmp_options[0])
continue
option = tmp_options[0]
areas = [e.get('area') for e in tmp_options]
option['area'] = areas
option['multiple_tables'] = True
options.append(option)
return options | 160,128 |
Convert Tabula app template to tabula-py option
Args:
template (dict): Tabula app template
Returns:
`obj`:dict: tabula-py option | def _convert_template_option(template):
option = {}
extraction_method = template.get('extraction_method')
if extraction_method == 'guess':
option['guess'] = True
elif extraction_method == 'lattice':
option['lattice'] = True
elif extraction_method == 'stream':
option['stream'] = True
option['pages'] = template.get('page')
option['area'] = [round(template['y1'], 3), round(template['x1'], 3), round(template['y2'], 3), round(template['x2'], 3)]
return option | 160,129 |
Convert tables from PDFs in a directory.
Args:
input_dir (str):
Directory path.
output_format (str, optional):
Output format of this function (csv, json or tsv)
java_options (list, optional):
Set java options like `-Xmx256m`.
kwargs (dict):
Dictionary of option for tabula-java. Details are shown in `build_options()`
Returns:
Nothing. Outputs are saved into the same directory with `input_dir` | def convert_into_by_batch(input_dir, output_format='csv', java_options=None, **kwargs):
if input_dir is None or not os.path.isdir(input_dir):
raise AttributeError("'input_dir' shoud be directory path")
kwargs['format'] = _extract_format_for_conversion(output_format)
if java_options is None:
java_options = []
elif isinstance(java_options, str):
java_options = shlex.split(java_options)
# Option for batch
kwargs['batch'] = input_dir
_run(java_options, kwargs) | 160,137 |
Extract tables from json.
Args:
raw_json (list):
Decoded list from tabula-java JSON.
pandas_options (dict optional):
pandas options for `pd.DataFrame()` | def _extract_from(raw_json, pandas_options=None):
data_frames = []
if pandas_options is None:
pandas_options = {}
columns = pandas_options.pop('columns', None)
columns, header_line_number = _convert_pandas_csv_options(pandas_options, columns)
for table in raw_json:
list_data = [[np.nan if not e['text'] else e['text'] for e in row] for row in table['data']]
_columns = columns
if isinstance(header_line_number, int) and not columns:
_columns = list_data.pop(header_line_number)
_columns = ['' if e is np.nan else e for e in _columns]
data_frames.append(pd.DataFrame(data=list_data, columns=_columns, **pandas_options))
return data_frames | 160,138 |
Translate `pd.read_csv()` options into `pd.DataFrame()` especially for header.
Args:
pandas_option (dict):
pandas options like {'header': None}.
columns (list):
list of column name. | def _convert_pandas_csv_options(pandas_options, columns):
_columns = pandas_options.pop('names', columns)
header = pandas_options.pop('header', None)
pandas_options.pop('encoding', None)
if header == 'infer':
header_line_number = 0 if not bool(_columns) else None
else:
header_line_number = header
return _columns, header_line_number | 160,139 |
Ensure localize target file.
If the target file is remote, this function fetches into local storage.
Args:
path (str):
File path or file like object or URL of target file.
Returns:
filename (str): file name in local storage
temporary_file_flag (bool): temporary file flag | def localize_file(path_or_buffer):
path_or_buffer = _stringify_path(path_or_buffer)
if _is_url(path_or_buffer):
req = urlopen(path_or_buffer)
filename = os.path.basename(req.geturl())
if os.path.splitext(filename)[-1] is not ".pdf":
pid = os.getpid()
filename = "{0}.pdf".format(pid)
with open(filename, 'wb') as f:
shutil.copyfileobj(req, f)
return filename, True
elif is_file_like(path_or_buffer):
pid = os.getpid()
filename = "{0}.pdf".format(pid)
with open(filename, 'wb') as f:
shutil.copyfileobj(path_or_buffer, f)
return filename, True
# File path case
else:
return os.path.expanduser(path_or_buffer), False | 160,141 |
Convert path like object to string
Args:
path_or_buffer: object to be converted
Returns:
string_path_or_buffer: maybe string version of path_or_buffer | def _stringify_path(path_or_buffer):
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
if hasattr(path_or_buffer, '__fspath__'):
return path_or_buffer.__fspath__()
if _PATHLIB_INSTALLED and isinstance(path_or_buffer, pathlib.Path):
return text_type(path_or_buffer)
return path_or_buffer | 160,142 |
Loads the contents of the file specified by path
Args:
path (string): The relative or absolute path to the file to
be loaded. If the path is relative, then it is combined
with the base_path to generate a full path string
Returns:
string: The contents of the file as a string
Raises:
ConfigurationError: If the file cannot be loaded | def get_contents(self, path):
try:
if not os.path.exists(path):
raise ConfigurationError('specified path does not exist %s' % path)
with open(path) as f:
data = f.read()
return data
except (IOError, OSError) as exc:
raise ConfigurationError('error trying to load file contents: %s' % exc) | 160,980 |
Transform the path to an absolute path
Args:
path (string): The path to transform to an absolute path
Returns:
string: The absolute path to the file | def abspath(self, path):
if not path.startswith(os.path.sep) or path.startswith('~'):
path = os.path.expanduser(os.path.join(self.base_path, path))
return path | 160,981 |
Inspects the object and returns if it is a playbook
Args:
obj (object): The object to be inspected by this function
Returns:
boolean: True if the object is a list and False if it is not | def isplaybook(obj):
return isinstance(obj, Iterable) and (not isinstance(obj, string_types) and not isinstance(obj, Mapping)) | 161,013 |
Construct a PDF image from a Image XObject inside a PDF
``pim = PdfImage(page.Resources.XObject['/ImageNN'])``
Args:
obj (pikepdf.Object): an Image XObject | def __init__(self, obj):
if isinstance(obj, Stream) and obj.stream_dict.get("/Subtype") != "/Image":
raise TypeError("can't construct PdfImage from non-image")
self.obj = obj | 161,053 |
Attempt to extract the image directly to a usable image file
If there is no way to extract the image without decompressing or
transcoding then raise an exception. The type and format of image
generated will vary.
Args:
stream: Writable stream to write data to | def _extract_direct(self, *, stream):
def normal_dct_rgb():
# Normal DCTDecode RGB images have the default value of
# /ColorTransform 1 and are actually in YUV. Such a file can be
# saved as a standard JPEG. RGB JPEGs without YUV conversion can't
# be saved as JPEGs, and are probably bugs. Some software in the
# wild actually produces RGB JPEGs in PDFs (probably a bug).
DEFAULT_CT_RGB = 1
ct = self.filter_decodeparms[0][1].get('/ColorTransform', DEFAULT_CT_RGB)
return self.mode == 'RGB' and ct == DEFAULT_CT_RGB
def normal_dct_cmyk():
# Normal DCTDecode CMYKs have /ColorTransform 0 and can be saved.
# There is a YUVK colorspace but CMYK JPEGs don't generally use it
DEFAULT_CT_CMYK = 0
ct = self.filter_decodeparms[0][1].get('/ColorTransform', DEFAULT_CT_CMYK)
return self.mode == 'CMYK' and ct == DEFAULT_CT_CMYK
if self.filters == ['/CCITTFaxDecode']:
data = self.obj.read_raw_bytes()
stream.write(self._generate_ccitt_header(data))
stream.write(data)
return '.tif'
elif self.filters == ['/DCTDecode'] and (
self.mode == 'L' or normal_dct_rgb() or normal_dct_cmyk()
):
buffer = self.obj.get_raw_stream_buffer()
stream.write(buffer)
return '.jpg'
raise UnsupportedImageTypeError() | 161,056 |
Setup and return a filter stack.
Args:
stack: :class:`~sqlparse.filters.FilterStack` instance
options: Dictionary with options validated by validate_options. | def build_filter_stack(stack, options):
# Token filter
if options.get('keyword_case'):
stack.preprocess.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case'):
stack.preprocess.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings'):
stack.preprocess.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
if options.get('use_space_around_operators', False):
stack.enable_grouping()
stack.stmtprocess.append(filters.SpacesAroundOperatorsFilter())
# After grouping
if options.get('strip_comments'):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripCommentsFilter())
if options.get('strip_whitespace') or options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())
if options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.ReindentFilter(
char=options['indent_char'],
width=options['indent_width'],
indent_after_first=options['indent_after_first'],
indent_columns=options['indent_columns'],
wrap_after=options['wrap_after'],
comma_first=options['comma_first']))
if options.get('reindent_aligned', False):
stack.enable_grouping()
stack.stmtprocess.append(
filters.AlignedIndentFilter(char=options['indent_char']))
if options.get('right_margin'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.RightMarginFilter(width=options['right_margin']))
# Serializer
if options.get('output_format'):
frmt = options['output_format']
if frmt.lower() == 'php':
fltr = filters.OutputPHPFilter()
elif frmt.lower() == 'python':
fltr = filters.OutputPythonFilter()
else:
fltr = None
if fltr is not None:
stack.postprocess.append(fltr)
return stack | 161,135 |
Manage GitlabHttpError exceptions.
This decorator function can be used to catch GitlabHttpError exceptions
raise specialized exceptions instead.
Args:
error(Exception): The exception type to raise -- must inherit from
GitlabError | def on_http_error(error):
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except GitlabHttpError as e:
raise error(e.error_message, e.response_code, e.response_body)
return wrapped_f
return wrap | 163,339 |
Creates an objects list from a GitlabList.
You should not create objects of this type, but use managers list()
methods instead.
Args:
manager: the RESTManager to attach to the objects
obj_cls: the class of the created objects
_list: the GitlabList holding the data | def __init__(self, manager, obj_cls, _list):
self.manager = manager
self._obj_cls = obj_cls
self._list = _list | 163,362 |
REST manager constructor.
Args:
gl (Gitlab): :class:`~gitlab.Gitlab` connection to use to make
requests.
parent: REST object to which the manager is attached. | def __init__(self, gl, parent=None):
self.gitlab = gl
self._parent = parent # for nested managers
self._computed_path = self._compute_path() | 163,364 |
Block the user.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabBlockError: If the user could not be blocked
Returns:
bool: Whether the user status has been changed | def block(self, **kwargs):
path = '/users/%s/block' % self.id
server_data = self.manager.gitlab.http_post(path, **kwargs)
if server_data is True:
self._attrs['state'] = 'blocked'
return server_data | 163,367 |
Update an object on the server.
Args:
id: ID of the object to update (can be None if not required)
new_data: the update data for the object
**kwargs: Extra options to send to the server (e.g. sudo)
Returns:
dict: The new object data (*not* a RESTObject)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabUpdateError: If the server cannot perform the request | def update(self, id=None, new_data={}, **kwargs):
data = new_data.copy()
if 'domain_whitelist' in data and data['domain_whitelist'] is None:
data.pop('domain_whitelist')
super(ApplicationSettingsManager, self).update(id, data, **kwargs) | 163,368 |
Transfer a project to this group.
Args:
to_project_id (int): ID of the project to transfer
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTransferProjectError: If the project could not be transfered | def transfer_project(self, to_project_id, **kwargs):
path = '/groups/%s/projects/%s' % (self.id, to_project_id)
self.manager.gitlab.http_post(path, **kwargs) | 163,374 |
Add an LDAP group link.
Args:
cn (str): CN of the LDAP group
group_access (int): Minimum access level for members of the LDAP
group
provider (str): LDAP provider for the LDAP group
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request | def add_ldap_group_link(self, cn, group_access, provider, **kwargs):
path = '/groups/%s/ldap_group_links' % self.get_id()
data = {'cn': cn, 'group_access': group_access, 'provider': provider}
self.manager.gitlab.http_post(path, post_data=data, **kwargs) | 163,375 |
Delete an LDAP group link.
Args:
cn (str): CN of the LDAP group
provider (str): LDAP provider for the LDAP group
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request | def delete_ldap_group_link(self, cn, provider=None, **kwargs):
path = '/groups/%s/ldap_group_links' % self.get_id()
if provider is not None:
path += '/%s' % provider
path += '/%s' % cn
self.manager.gitlab.http_delete(path) | 163,376 |
Sync LDAP groups.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request | def ldap_sync(self, **kwargs):
path = '/groups/%s/ldap_sync' % self.get_id()
self.manager.gitlab.http_post(path, **kwargs) | 163,377 |
Unprotect the branch.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabProtectError: If the branch could not be unprotected | def unprotect(self, **kwargs):
id = self.get_id().replace('/', '%2F')
path = '%s/%s/unprotect' % (self.manager.path, id)
self.manager.gitlab.http_put(path, **kwargs)
self._attrs['protected'] = False | 163,380 |
Cancel the job.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobCancelError: If the job could not be canceled | def cancel(self, **kwargs):
path = '%s/%s/cancel' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path) | 163,381 |
Retry the job.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobRetryError: If the job could not be retried | def retry(self, **kwargs):
path = '%s/%s/retry' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path) | 163,382 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.