docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical". Returns: ndarray: The flipped image.
def imflip(img, direction='horizontal'): assert direction in ['horizontal', 'vertical'] if direction == 'horizontal': return np.flip(img, axis=1) else: return np.flip(img, axis=0)
126,348
Clip bboxes to fit the image shape. Args: bboxes (ndarray): Shape (..., 4*k) img_shape (tuple): (height, width) of the image. Returns: ndarray: Clipped bboxes.
def bbox_clip(bboxes, img_shape): assert bboxes.shape[-1] % 4 == 0 clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype) clipped_bboxes[..., 0::2] = np.maximum( np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0) clipped_bboxes[..., 1::2] = np.maximum( np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0) return clipped_bboxes
126,350
Scaling bboxes w.r.t the box center. Args: bboxes (ndarray): Shape(..., 4). scale (float): Scaling factor. clip_shape (tuple, optional): If specified, bboxes that exceed the boundary will be clipped according to the given shape (h, w). Returns: ndarray: Scaled bboxes.
def bbox_scaling(bboxes, scale, clip_shape=None): if float(scale) == 1.0: scaled_bboxes = bboxes.copy() else: w = bboxes[..., 2] - bboxes[..., 0] + 1 h = bboxes[..., 3] - bboxes[..., 1] + 1 dw = (w * (scale - 1)) * 0.5 dh = (h * (scale - 1)) * 0.5 scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) if clip_shape is not None: return bbox_clip(scaled_bboxes, clip_shape) else: return scaled_bboxes
126,351
Crop image patches. 3 steps: scale the bboxes -> clip bboxes -> crop and pad. Args: img (ndarray): Image to be cropped. bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. scale (float, optional): Scale ratio of bboxes, the default value 1.0 means no padding. pad_fill (number or list): Value to be filled for padding, None for no padding. Returns: list or ndarray: The cropped image patches.
def imcrop(img, bboxes, scale=1.0, pad_fill=None): chn = 1 if img.ndim == 2 else img.shape[2] if pad_fill is not None: if isinstance(pad_fill, (int, float)): pad_fill = [pad_fill for _ in range(chn)] assert len(pad_fill) == chn _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) clipped_bbox = bbox_clip(scaled_bboxes, img.shape) patches = [] for i in range(clipped_bbox.shape[0]): x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) if pad_fill is None: patch = img[y1:y2 + 1, x1:x2 + 1, ...] else: _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) if chn == 2: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) else: patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) patch = np.array( pad_fill, dtype=img.dtype) * np.ones( patch_shape, dtype=img.dtype) x_start = 0 if _x1 >= 0 else -_x1 y_start = 0 if _y1 >= 0 else -_y1 w = x2 - x1 + 1 h = y2 - y1 + 1 patch[y_start:y_start + h, x_start:x_start + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] patches.append(patch) if bboxes.ndim == 1: return patches[0] else: return patches
126,352
Pad an image to a certain shape. Args: img (ndarray): Image to be padded. shape (tuple): Expected padding shape. pad_val (number or sequence): Values to be filled in padding areas. Returns: ndarray: The padded image.
def impad(img, shape, pad_val=0): if not isinstance(pad_val, (int, float)): assert len(pad_val) == img.shape[-1] if len(shape) < len(img.shape): shape = shape + (img.shape[-1], ) assert len(shape) == len(img.shape) for i in range(len(shape) - 1): assert shape[i] >= img.shape[i] pad = np.empty(shape, dtype=img.dtype) pad[...] = pad_val pad[:img.shape[0], :img.shape[1], ...] = img return pad
126,353
Pad an image to ensure each edge to be multiple to some number. Args: img (ndarray): Image to be padded. divisor (int): Padded image edges will be multiple to divisor. pad_val (number or sequence): Same as :func:`impad`. Returns: ndarray: The padded image.
def impad_to_multiple(img, divisor, pad_val=0): pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor return impad(img, (pad_h, pad_w), pad_val)
126,354
Rescale a size by a ratio. Args: size (tuple): w, h. scale (float): Scaling factor. Returns: tuple[int]: scaled size.
def _scale_size(size, scale): w, h = size return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
126,355
Resize image to a given size. Args: img (ndarray): The input image. size (tuple): Target (w, h). return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos". Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`.
def imresize(img, size, return_scale=False, interpolation='bilinear'): h, w = img.shape[:2] resized_img = cv2.resize( img, size, interpolation=interp_codes[interpolation]) if not return_scale: return resized_img else: w_scale = size[0] / w h_scale = size[1] / h return resized_img, w_scale, h_scale
126,356
Resize image to the same size of a given image. Args: img (ndarray): The input image. dst_img (ndarray): The target image. return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Same as :func:`resize`. Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`.
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'): h, w = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation)
126,357
Register a handler for some file extensions. Args: handler (:obj:`BaseFileHandler`): Handler to be registered. file_formats (str or list[str]): File formats to be handled by this handler.
def _register_handler(handler, file_formats): if not isinstance(handler, BaseFileHandler): raise TypeError( 'handler must be a child of BaseFileHandler, not {}'.format( type(handler))) if isinstance(file_formats, str): file_formats = [file_formats] if not is_list_of(file_formats, str): raise TypeError('file_formats must be a str or a list of str') for ext in file_formats: file_handlers[ext] = handler
126,361
Get priority value. Args: priority (int or str or :obj:`Priority`): Priority. Returns: int: The priority value.
def get_priority(priority): if isinstance(priority, int): if priority < 0 or priority > 100: raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
126,363
Quantize an array of (-inf, inf) to [0, levels-1]. Args: arr (ndarray): Input array. min_val (scalar): Minimum value to be clipped. max_val (scalar): Maximum value to be clipped. levels (int): Quantization levels. dtype (np.type): The type of the quantized array. Returns: tuple: Quantized array.
def quantize(arr, min_val, max_val, levels, dtype=np.int64): if not (isinstance(levels, int) and levels > 1): raise ValueError( 'levels must be a positive integer, but got {}'.format(levels)) if min_val >= max_val: raise ValueError( 'min_val ({}) must be smaller than max_val ({})'.format( min_val, max_val)) arr = np.clip(arr, min_val, max_val) - min_val quantized_arr = np.minimum( np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1) return quantized_arr
126,364
Dequantize an array. Args: arr (ndarray): Input array. min_val (scalar): Minimum value to be clipped. max_val (scalar): Maximum value to be clipped. levels (int): Quantization levels. dtype (np.type): The type of the dequantized array. Returns: tuple: Dequantized array.
def dequantize(arr, min_val, max_val, levels, dtype=np.float64): if not (isinstance(levels, int) and levels > 1): raise ValueError( 'levels must be a positive integer, but got {}'.format(levels)) if min_val >= max_val: raise ValueError( 'min_val ({}) must be smaller than max_val ({})'.format( min_val, max_val)) dequantized_arr = (arr + 0.5).astype(dtype) * ( max_val - min_val) / levels + min_val return dequantized_arr
126,365
Show an image. Args: img (str or ndarray): The image to be displayed. win_name (str): The window name. wait_time (int): Value of waitKey param.
def imshow(img, win_name='', wait_time=0): cv2.imshow(win_name, imread(img)) cv2.waitKey(wait_time)
126,392
Read an optical flow map. Args: flow_or_path (ndarray or str): A flow map or filepath. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs): if isinstance(flow_or_path, np.ndarray): if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2): raise ValueError('Invalid flow with shape {}'.format( flow_or_path.shape)) return flow_or_path elif not is_str(flow_or_path): raise TypeError( '"flow_or_path" must be a filename or numpy array, not {}'.format( type(flow_or_path))) if not quantize: with open(flow_or_path, 'rb') as f: try: header = f.read(4).decode('utf-8') except Exception: raise IOError('Invalid flow file: {}'.format(flow_or_path)) else: if header != 'PIEH': raise IOError( 'Invalid flow file: {}, header does not contain PIEH'. format(flow_or_path)) w = np.fromfile(f, np.int32, 1).squeeze() h = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2)) else: assert concat_axis in [0, 1] cat_flow = imread(flow_or_path, flag='unchanged') if cat_flow.ndim != 2: raise IOError( '{} is not a valid quantized flow file, its dimension is {}.'. format(flow_or_path, cat_flow.ndim)) assert cat_flow.shape[concat_axis] % 2 == 0 dx, dy = np.split(cat_flow, 2, axis=concat_axis) flow = dequantize_flow(dx, dy, *args, **kwargs) return flow.astype(np.float32)
126,395
Quantize flow to [0, 255]. After this step, the size of flow will be much smaller, and can be dumped as jpeg images. Args: flow (ndarray): (h, w, 2) array of optical flow. max_val (float): Maximum value of flow, values beyond [-max_val, max_val] will be truncated. norm (bool): Whether to divide flow values by image width/height. Returns: tuple[ndarray]: Quantized dx and dy.
def quantize_flow(flow, max_val=0.02, norm=True): h, w, _ = flow.shape dx = flow[..., 0] dy = flow[..., 1] if norm: dx = dx / w # avoid inplace operations dy = dy / h # use 255 levels instead of 256 to make sure 0 is 0 after dequantization. flow_comps = [ quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy] ] return tuple(flow_comps)
126,397
Recover from quantized flow. Args: dx (ndarray): Quantized dx. dy (ndarray): Quantized dy. max_val (float): Maximum value used when quantizing. denorm (bool): Whether to multiply flow values with width/height. Returns: ndarray: Dequantized flow.
def dequantize_flow(dx, dy, max_val=0.02, denorm=True): assert dx.shape == dy.shape assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] if denorm: dx *= dx.shape[1] dy *= dx.shape[0] flow = np.dstack((dx, dy)) return flow
126,398
Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Either a filepath or URL or modelzoo://xxxxxxx. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. Returns: dict or OrderedDict: The loaded checkpoint.
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None): # load checkpoint from modelzoo or file or url if filename.startswith('modelzoo://'): import torchvision model_urls = dict() for _, name, ispkg in pkgutil.walk_packages( torchvision.models.__path__): if not ispkg: _zoo = import_module('torchvision.models.{}'.format(name)) _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) model_name = filename[11:] checkpoint = model_zoo.load_url(model_urls[model_name]) elif filename.startswith('open-mmlab://'): model_name = filename[13:] checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name]) elif filename.startswith(('http://', 'https://')): checkpoint = model_zoo.load_url(filename) else: if not osp.isfile(filename): raise IOError('{} is not a checkpoint file'.format(filename)) checkpoint = torch.load(filename, map_location=map_location) # get state_dict from checkpoint if isinstance(checkpoint, OrderedDict): state_dict = checkpoint elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: raise RuntimeError( 'No state_dict found in checkpoint file {}'.format(filename)) # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} # load state_dict if hasattr(model, 'module'): load_state_dict(model.module, state_dict, strict, logger) else: load_state_dict(model, state_dict, strict, logger) return checkpoint
126,417
Copy a model state_dict to cpu. Args: state_dict (OrderedDict): Model weights on GPU. Returns: OrderedDict: Model weights on GPU.
def weights_to_cpu(state_dict): state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() return state_dict_cpu
126,418
Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. meta (dict, optional): Metadata to be saved in checkpoint.
def save_checkpoint(model, filename, optimizer=None, meta=None): if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError('meta must be a dict or None, but got {}'.format( type(meta))) meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) mmcv.mkdir_or_exist(osp.dirname(filename)) if hasattr(model, 'module'): model = model.module checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(model.state_dict()) } if optimizer is not None: checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, filename)
126,419
Init the optimizer. Args: optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an optimizer object or a dict used for constructing the optimizer. Returns: :obj:`~torch.optim.Optimizer`: An optimizer object. Examples: >>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9) >>> type(runner.init_optimizer(optimizer)) <class 'torch.optim.sgd.SGD'>
def init_optimizer(self, optimizer): if isinstance(optimizer, dict): optimizer = obj_from_dict( optimizer, torch.optim, dict(params=self.model.parameters())) elif not isinstance(optimizer, torch.optim.Optimizer): raise TypeError( 'optimizer must be either an Optimizer object or a dict, ' 'but got {}'.format(type(optimizer))) return optimizer
126,422
Init the logger. Args: log_dir(str, optional): Log file directory. If not specified, no log file will be used. level (int or str): See the built-in python logging module. Returns: :obj:`~logging.Logger`: Python logger.
def init_logger(self, log_dir=None, level=logging.INFO): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=level) logger = logging.getLogger(__name__) if log_dir and self.rank == 0: filename = '{}.log'.format(self.timestamp) log_file = osp.join(log_dir, filename) self._add_file_handler(logger, log_file, level=level) return logger
126,424
Register a hook into the hook list. Args: hook (:obj:`Hook`): The hook to be registered. priority (int or str or :obj:`Priority`): Hook priority. Lower value means higher priority.
def register_hook(self, hook, priority='NORMAL'): assert isinstance(hook, Hook) if hasattr(hook, 'priority'): raise ValueError('"priority" is a reserved attribute for hooks') priority = get_priority(priority) hook.priority = priority # insert the hook to a sorted list inserted = False for i in range(len(self._hooks) - 1, -1, -1): if priority >= self._hooks[i].priority: self._hooks.insert(i + 1, hook) inserted = True break if not inserted: self._hooks.insert(0, hook)
126,426
Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, epochs) to specify the running order and epochs. E.g, [('train', 2), ('val', 1)] means running 2 epochs for training and 1 epoch for validation, iteratively. max_epochs (int): Total training epochs.
def run(self, data_loaders, workflow, max_epochs, **kwargs): assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert len(data_loaders) == len(workflow) self._max_epochs = max_epochs work_dir = self.work_dir if self.work_dir is not None else 'NONE' self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs) self.call_hook('before_run') while self.epoch < max_epochs: for i, flow in enumerate(workflow): mode, epochs = flow if isinstance(mode, str): # self.train() if not hasattr(self, mode): raise ValueError( 'runner has no method named "{}" to run an epoch'. format(mode)) epoch_runner = getattr(self, mode) elif callable(mode): # custom train() epoch_runner = mode else: raise TypeError('mode in workflow must be a str or ' 'callable function, not {}'.format( type(mode))) for _ in range(epochs): if mode == 'train' and self.epoch >= max_epochs: return epoch_runner(data_loaders[i], **kwargs) time.sleep(1) # wait for some hooks like loggers to finish self.call_hook('after_run')
126,434
Resize a video. Args: in_file (str): Input video filename. out_file (str): Output video filename. size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1). ratio (tuple or float): Expected resize ratio, (2, 0.5) means (w*2, h*0.5). keep_ar (bool): Whether to keep original aspect ratio. log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command.
def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False, **kwargs): if size is None and ratio is None: raise ValueError('expected size or ratio must be specified') elif size is not None and ratio is not None: raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if not keep_ar: options['vf'] = 'scale={}:{}'.format(size[0], size[1]) else: options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio' '=decrease'.format(size[0], size[1])) else: if not isinstance(ratio, tuple): ratio = (ratio, ratio) options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format( ratio[0], ratio[1]) convert_video(in_file, out_file, print_cmd, **options)
126,444
Cut a clip from a video. Args: in_file (str): Input video filename. out_file (str): Output video filename. start (None or float): Start time (in seconds). end (None or float): End time (in seconds). vcodec (None or str): Output video codec, None for unchanged. acodec (None or str): Output audio codec, None for unchanged. log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command.
def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): options = {'log_level': log_level} if vcodec is None: options['vcodec'] = 'copy' if acodec is None: options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = end - start convert_video(in_file, out_file, print_cmd, **options)
126,445
Concatenate multiple videos into a single one. Args: video_list (list): A list of video filenames out_file (str): Output video filename vcodec (None or str): Output video codec, None for unchanged acodec (None or str): Output audio codec, None for unchanged log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command.
def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): _, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write('file {}\n'.format(osp.abspath(filename))) options = {'log_level': log_level} if vcodec is None: options['vcodec'] = 'copy' if acodec is None: options['acodec'] = 'copy' convert_video( tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.remove(tmp_filename)
126,446
Load a text file and parse the content as a list of strings. Args: filename (str): Filename. prefix (str): The prefix to be inserted to the begining of each item. offset (int): The offset of lines. max_num (int): The maximum number of lines to be read, zeros and negatives mean no limitation. Returns: list[str]: A list of strings.
def list_from_file(filename, prefix='', offset=0, max_num=0): cnt = 0 item_list = [] with open(filename, 'r') as f: for _ in range(offset): f.readline() for line in f: if max_num > 0 and cnt >= max_num: break item_list.append(prefix + line.rstrip('\n')) cnt += 1 return item_list
126,447
Load a text file and parse the content as a dict. Each line of the text file will be two or more columns splited by whitespaces or tabs. The first column will be parsed as dict keys, and the following columns will be parsed as dict values. Args: filename(str): Filename. key_type(type): Type of the dict's keys. str is user by default and type conversion will be performed if specified. Returns: dict: The parsed contents.
def dict_from_file(filename, key_type=str): mapping = {} with open(filename, 'r') as f: for line in f: items = line.rstrip('\n').split() assert len(items) >= 2 key = key_type(items[0]) val = items[1:] if len(items) > 2 else items[1] mapping[key] = val return mapping
126,448
Read an image. Args: img_or_path (ndarray or str): Either a numpy array or image path. If it is a numpy array (loaded image), then it will be returned as is. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. Returns: ndarray: Loaded image array.
def imread(img_or_path, flag='color'): if isinstance(img_or_path, np.ndarray): return img_or_path elif is_str(img_or_path): flag = imread_flags[flag] if is_str(flag) else flag check_file_exist(img_or_path, 'img file does not exist: {}'.format(img_or_path)) return cv2.imread(img_or_path, flag) else: raise TypeError('"img" must be a numpy array or a filename')
126,462
Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Same as :func:`imread`. Returns: ndarray: Loaded image array.
def imfrombytes(content, flag='color'): img_np = np.frombuffer(content, np.uint8) flag = imread_flags[flag] if is_str(flag) else flag img = cv2.imdecode(img_np, flag) return img
126,463
Write image to file Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not.
def imwrite(img, file_path, params=None, auto_mkdir=True): if auto_mkdir: dir_name = osp.abspath(osp.dirname(file_path)) mkdir_or_exist(dir_name) return cv2.imwrite(file_path, img, params)
126,464
Convert a BGR image to grayscale image. Args: img (ndarray): The input image. keepdim (bool): If False (by default), then return the grayscale image with 2 dims, otherwise 3 dims. Returns: ndarray: The converted grayscale image.
def bgr2gray(img, keepdim=False): out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if keepdim: out_img = out_img[..., None] return out_img
126,468
Convert a grayscale image to BGR image. Args: img (ndarray or str): The input image. Returns: ndarray: The converted BGR image.
def gray2bgr(img): img = img[..., None] if img.ndim == 2 else img out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return out_img
126,469
Cast elements of an iterable object into some type. Args: inputs (Iterable): The input object. dst_type (type): Destination type. return_type (type, optional): If specified, the output object will be converted to this type, otherwise an iterator. Returns: iterator or specified type: The converted object.
def iter_cast(inputs, dst_type, return_type=None): if not isinstance(inputs, collections_abc.Iterable): raise TypeError('inputs must be an iterable object') if not isinstance(dst_type, type): raise TypeError('"dst_type" must be a valid type') out_iterable = six.moves.map(dst_type, inputs) if return_type is None: return out_iterable else: return return_type(out_iterable)
126,471
Check whether it is a sequence of some type. Args: seq (Sequence): The sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. Returns: bool: Whether the sequence is valid.
def is_seq_of(seq, expected_type, seq_type=None): if seq_type is None: exp_seq_type = collections_abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True
126,472
Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list.
def slice_list(in_list, lens): if not isinstance(lens, list): raise TypeError('"indices" must be a list of integers') elif sum(lens) != len(in_list): raise ValueError( 'sum of lens and list length does not match: {} != {}'.format( sum(lens), len(in_list))) out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:idx + lens[i]]) idx += lens[i] return out_list
126,473
A decorator factory to check if prerequisites are satisfied. Args: prerequisites (str of list[str]): Prerequisites to be checked. checker (callable): The checker method that returns True if a prerequisite is meet, False otherwise. msg_tmpl (str): The message template with two variables. Returns: decorator: A specific decorator.
def check_prerequisites( prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' 'found, please install them first.'): def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = [prerequisites] if isinstance( prerequisites, str) else prerequisites missing = [] for item in requirements: if not checker(item): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
126,474
Convert various input to color tuples. Args: color (:obj:`Color`/str/tuple/int/ndarray): Color inputs Returns: tuple[int]: A tuple of 3 integers indicating BGR channels.
def color_val(color): if is_str(color): return Color[color].value elif isinstance(color, Color): return color.value elif isinstance(color, tuple): assert len(color) == 3 for channel in color: assert channel >= 0 and channel <= 255 return color elif isinstance(color, int): assert color >= 0 and color <= 255 return color, color, color elif isinstance(color, np.ndarray): assert color.ndim == 1 and color.size == 3 assert np.all((color >= 0) & (color <= 255)) color = color.astype(np.uint8) return tuple(color) else: raise TypeError('Invalid type for color: {}'.format(type(color)))
126,500
Add check points in a single line. This method is suitable for running a task on a list of items. A timer will be registered when the method is called for the first time. :Example: >>> import time >>> import mmcv >>> for i in range(1, 6): >>> # simulate a code block >>> time.sleep(i) >>> mmcv.check_time('task1') 2.000 3.000 4.000 5.000 Args: timer_id (str): Timer identifier.
def check_time(timer_id): if timer_id not in _g_timers: _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
126,501
Show optical flow. Args: flow (ndarray or str): The optical flow to be displayed. win_name (str): The window name. wait_time (int): Value of waitKey param.
def flowshow(flow, win_name='', wait_time=0): flow = flowread(flow) flow_img = flow2rgb(flow) imshow(rgb2bgr(flow_img), win_name, wait_time)
126,507
Convert flow map to RGB image. Args: flow (ndarray): Array of optical flow. color_wheel (ndarray or None): Color wheel used to map flow field to RGB colorspace. Default color wheel will be used if not specified. unknown_thr (str): Values above this threshold will be marked as unknown and thus ignored. Returns: ndarray: RGB image that can be visualized.
def flow2rgb(flow, color_wheel=None, unknown_thr=1e6): assert flow.ndim == 3 and flow.shape[-1] == 2 if color_wheel is None: color_wheel = make_color_wheel() assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3 num_bins = color_wheel.shape[0] dx = flow[:, :, 0].copy() dy = flow[:, :, 1].copy() ignore_inds = (np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) | (np.abs(dy) > unknown_thr)) dx[ignore_inds] = 0 dy[ignore_inds] = 0 rad = np.sqrt(dx**2 + dy**2) if np.any(rad > np.finfo(float).eps): max_rad = np.max(rad) dx /= max_rad dy /= max_rad [h, w] = dx.shape rad = np.sqrt(dx**2 + dy**2) angle = np.arctan2(-dy, -dx) / np.pi bin_real = (angle + 1) / 2 * (num_bins - 1) bin_left = np.floor(bin_real).astype(int) bin_right = (bin_left + 1) % num_bins w = (bin_real - bin_left.astype(np.float32))[..., None] flow_img = ( 1 - w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :] small_ind = rad <= 1 flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind]) flow_img[np.logical_not(small_ind)] *= 0.75 flow_img[ignore_inds, :] = 0 return flow_img
126,508
Build a color wheel. Args: bins(list or tuple, optional): Specify the number of bins for each color range, corresponding to six ranges: red -> yellow, yellow -> green, green -> cyan, cyan -> blue, blue -> magenta, magenta -> red. [15, 6, 4, 11, 13, 6] is used for default (see Middlebury). Returns: ndarray: Color wheel of shape (total_bins, 3).
def make_color_wheel(bins=None): if bins is None: bins = [15, 6, 4, 11, 13, 6] assert len(bins) == 6 RY, YG, GC, CB, BM, MR = tuple(bins) ry = [1, np.arange(RY) / RY, 0] yg = [1 - np.arange(YG) / YG, 1, 0] gc = [0, 1, np.arange(GC) / GC] cb = [0, 1 - np.arange(CB) / CB, 1] bm = [np.arange(BM) / BM, 0, 1] mr = [1, 0, 1 - np.arange(MR) / MR] num_bins = RY + YG + GC + CB + BM + MR color_wheel = np.zeros((3, num_bins), dtype=np.float32) col = 0 for i, color in enumerate([ry, yg, gc, cb, bm, mr]): for j in range(3): color_wheel[j, col:col + bins[i]] = color[j] col += bins[i] return color_wheel.T
126,509
Validates that a probability is between 0 and 1 inclusively. Args: p: The value to validate. p_str: What to call the probability in error messages. Returns: The probability p if the probability if valid. Raises: ValueError if the probability is invalid.
def validate_probability(p: float, p_str: str) -> float: if p < 0: raise ValueError('{} was less than 0.'.format(p_str)) elif p > 1: raise ValueError('{} was greater than 1.'.format(p_str)) return p
126,549
Returns a list of operations apply this gate to each of the targets. Args: *targets: The qubits to apply this gate to. Returns: Operations applying this gate to the target qubits. Raises: ValueError if targets are not instances of Qid.
def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE: return [self.on(target) for target in targets]
126,552
Initializes a Timestamp with a time specified in ns and/or ps. The time is relative to some unspecified "time zero". If both picos and nanos are specified, their contributions away from zero are added. Args: picos: How many picoseconds away from time zero? nanos: How many nanoseconds away from time zero?
def __init__(self, *, # Forces keyword args. picos: Union[int, float] = 0, nanos: Union[int, float] = 0) -> None: if picos and nanos: self._picos = picos + nanos * 1000 else: # Try to preserve type information. self._picos = nanos * 1000 if nanos else picos
126,553
Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis.
def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray: # pyplot import is deferred because it requires a system dependency # (python3-tk) that `python -m pip install cirq` can't handle for the user. # This allows cirq to be usable without python3-tk. import matplotlib.pyplot as plt num_qubits = len(result.measurements.keys()) states = 2**num_qubits values = np.zeros(states) # measurements is a dict of {measurement gate key: # array(repetitions, boolean result)} # Convert this to an array of repetitions, each with an array of booleans. # e.g. {q1: array([[True, True]]), q2: array([[False, False]])} # --> array([[True, False], [True, False]]) measurement_by_result = np.array([ v.transpose()[0] for k, v in result.measurements.items()]).transpose() for meas in measurement_by_result: # Convert each array of booleans to a string representation. # e.g. [True, False] -> [1, 0] -> '10' -> 2 state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2) values[state_ind] += 1 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)] plt.bar(np.arange(states), values, tick_label=plot_labels) plt.xlabel('qubit state') plt.ylabel('result count') plt.show() return values
126,564
Initializes the equivalence class. Args: value: numerical value to wrap. period: periodicity of the numerical value.
def __init__(self, value: Union[int, float], period: Union[int, float]): self.value = value % period self.period = period
126,572
Construct the optimization pass. Args: no_decomp: A predicate that determines whether an operation should be decomposed or not. Defaults to decomposing everything.
def __init__(self, no_decomp: Callable[[ops.Operation], bool]=(lambda _: False) ) -> None: super().__init__() self.no_decomp = no_decomp
126,578
Runs the supplied Circuit or Schedule, mimicking quantum hardware. In contrast to run, this allows for sweeping over different parameter values. Args: program: The circuit or schedule to simulate. params: Parameters to run with the program. repetitions: The number of repetitions to simulate. Returns: TrialResult list for this run; one for each possible parameter resolver.
def run_sweep( self, program: Union[circuits.Circuit, schedules.Schedule], params: study.Sweepable, repetitions: int = 1, ) -> List[study.TrialResult]: circuit = (program if isinstance(program, circuits.Circuit) else program.to_circuit()) param_resolvers = study.to_resolvers(params) trial_results = [] # type: List[study.TrialResult] for param_resolver in param_resolvers: measurements = self._run(circuit=circuit, param_resolver=param_resolver, repetitions=repetitions) trial_results.append(study.TrialResult(params=param_resolver, repetitions=repetitions, measurements=measurements)) return trial_results
126,595
Computes SamplesDisplays in the supplied Circuit or Schedule. Args: program: The circuit or schedule to simulate. param_resolver: Parameters to run with the program. Returns: ComputeDisplaysResult for the simulation.
def compute_samples_displays( self, program: Union[circuits.Circuit, schedules.Schedule], param_resolver: 'study.ParamResolverOrSimilarType' = None, ) -> study.ComputeDisplaysResult: return self.compute_samples_displays_sweep( program, study.ParamResolver(param_resolver))[0]
126,597
Computes SamplesDisplays in the supplied Circuit or Schedule. In contrast to `compute_displays`, this allows for sweeping over different parameter values. Args: program: The circuit or schedule to simulate. params: Parameters to run with the program. Returns: List of ComputeDisplaysResults for this run, one for each possible parameter resolver.
def compute_samples_displays_sweep( self, program: Union[circuits.Circuit, schedules.Schedule], params: Optional[study.Sweepable] = None ) -> List[study.ComputeDisplaysResult]: circuit = (program if isinstance(program, circuits.Circuit) else program.to_circuit()) param_resolvers = study.to_resolvers(params or study.ParamResolver({})) compute_displays_results = [] # type: List[study.ComputeDisplaysResult] for param_resolver in param_resolvers: display_values = {} # type: Dict[Hashable, Any] preceding_circuit = circuits.Circuit() for i, moment in enumerate(circuit): displays = (op for op in moment if isinstance(op, ops.SamplesDisplay)) for display in displays: measurement_key = str(display.key) measurement_circuit = circuits.Circuit.from_ops( display.measurement_basis_change(), ops.measure(*display.qubits, key=measurement_key) ) measurements = self._run( preceding_circuit + measurement_circuit, param_resolver, display.num_samples) display_values[display.key] = ( display.value_derived_from_samples( measurements[measurement_key])) preceding_circuit.append(circuit[i]) compute_displays_results.append(study.ComputeDisplaysResult( params=param_resolver, display_values=display_values)) return compute_displays_results
126,598
This method can be overridden to creation of a trial result. Args: params: The ParamResolver for this trial. measurements: The measurement results for this trial. final_simulator_state: The final state of the simulator for the StepResult. Returns: The SimulationTrialResult.
def _create_simulator_trial_result(self, params: study.ParamResolver, measurements: Dict[str, np.ndarray], final_simulator_state: Any) \ -> 'SimulationTrialResult': return SimulationTrialResult( params=params, measurements=measurements, final_simulator_state=final_simulator_state)
126,604
Plots excited state probability vs the Rabi angle (angle of rotation around the x-axis). Args: **plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.
def plot(self, **plot_kwargs: Any) -> None: fig = plt.figure() plt.plot(self._rabi_angles, self._excited_state_probs, 'ro-', figure=fig, **plot_kwargs) plt.xlabel(r"Rabi Angle (Radian)", figure=fig) plt.ylabel('Excited State Probability', figure=fig) fig.show()
126,628
Plots the average ground state probability vs the number of Cliffords in the RB study. Args: **plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.
def plot(self, **plot_kwargs: Any) -> None: fig = plt.figure() plt.plot(self._num_cfds_seq, self._gnd_state_probs, 'ro-', figure=fig, **plot_kwargs) plt.xlabel(r"Number of Cliffords", figure=fig) plt.ylabel('Ground State Probability', figure=fig) fig.show()
126,631
Raises a matrix with two opposing eigenvalues to a power. Args: reflection_matrix: The matrix to raise to a power. exponent: The power to raise the matrix to. Returns: The given matrix raised to the given power.
def reflection_matrix_pow(reflection_matrix: np.ndarray, exponent: float): # The eigenvalues are x and -x for some complex unit x. Determine x. squared_phase = np.dot(reflection_matrix[:, 0], reflection_matrix[0, :]) phase = complex(np.sqrt(squared_phase)) # Extract +x and -x eigencomponents of the matrix. i = np.eye(reflection_matrix.shape[0]) * phase pos_part = (i + reflection_matrix) * 0.5 neg_part = (i - reflection_matrix) * 0.5 # Raise the matrix to a power by raising its eigencomponents to that power. pos_factor = phase**(exponent - 1) neg_factor = pos_factor * complex(-1)**exponent pos_part_raised = pos_factor * pos_part neg_part_raised = neg_part * neg_factor return pos_part_raised + neg_part_raised
126,632
Phases the given matrices so that they agree on the phase of one entry. To maximize precision, the position with the largest entry from one of the matrices is used when attempting to compute the phase difference between the two matrices. Args: a: A numpy array. b: Another numpy array. Returns: A tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.
def match_global_phase(a: np.ndarray, b: np.ndarray ) -> Tuple[np.ndarray, np.ndarray]: # Not much point when they have different shapes. if a.shape != b.shape: return a, b # Find the entry with the largest magnitude in one of the matrices. k = max(np.ndindex(*a.shape), key=lambda t: abs(b[t])) def dephase(v): r = np.real(v) i = np.imag(v) # Avoid introducing floating point error when axis-aligned. if i == 0: return -1 if r < 0 else 1 if r == 0: return 1j if i < 0 else -1j return np.exp(-1j * np.arctan2(i, r)) # Zero the phase at this entry in both matrices. return a * dephase(a[k]), b * dephase(b[k])
126,633
Determines if a matrix is a approximately diagonal. A matrix is diagonal if i!=j implies m[i,j]==0. Args: matrix: The matrix to check. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is diagonal within the given tolerance.
def is_diagonal(matrix: np.ndarray, *, atol: float = 1e-8) -> bool: matrix = np.copy(matrix) for i in range(min(matrix.shape)): matrix[i, i] = 0 return tolerance.all_near_zero(matrix, atol=atol)
126,642
Determines if a matrix is approximately Hermitian. A matrix is Hermitian if it's square and equal to its adjoint. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is Hermitian within the given tolerance.
def is_hermitian( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (matrix.shape[0] == matrix.shape[1] and np.allclose(matrix, np.conj(matrix.T), rtol=rtol, atol=atol))
126,643
Determines if a matrix is approximately orthogonal. A matrix is orthogonal if it's square and real and its transpose is its inverse. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is orthogonal within the given tolerance.
def is_orthogonal( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (matrix.shape[0] == matrix.shape[1] and np.all(np.imag(matrix) == 0) and np.allclose(matrix.dot(matrix.T), np.eye(matrix.shape[0]), rtol=rtol, atol=atol))
126,644
Determines if a matrix is approximately special orthogonal. A matrix is special orthogonal if it is square and real and its transpose is its inverse and its determinant is one. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is special orthogonal within the given tolerance.
def is_special_orthogonal( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (is_orthogonal(matrix, rtol=rtol, atol=atol) and (matrix.shape[0] == 0 or np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol)))
126,645
Determines if a matrix is approximately unitary. A matrix is unitary if it's square and its adjoint is its inverse. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is unitary within the given tolerance.
def is_unitary( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (matrix.shape[0] == matrix.shape[1] and np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]), rtol=rtol, atol=atol))
126,646
Determines if a matrix is approximately unitary with unit determinant. A matrix is special-unitary if it is square and its adjoint is its inverse and its determinant is one. Args: matrix: The matrix to check. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the matrix is unitary with unit determinant within the given tolerance.
def is_special_unitary( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (is_unitary(matrix, rtol=rtol, atol=atol) and (matrix.shape[0] == 0 or np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol)))
126,647
Determines if two matrices approximately commute. Two matrices A and B commute if they are square and have the same size and AB = BA. Args: m1: One of the matrices. m2: The other matrix. rtol: The per-matrix-entry relative tolerance on equality. atol: The per-matrix-entry absolute tolerance on equality. Returns: Whether the two matrices have compatible sizes and a commutator equal to zero within tolerance.
def commutes( m1: np.ndarray, m2: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> bool: return (m1.shape[0] == m1.shape[1] and m1.shape == m2.shape and np.allclose(m1.dot(m2), m2.dot(m1), rtol=rtol, atol=atol))
126,648
Determines if a ~= b * exp(i t) for some t. Args: a: A numpy array. b: Another numpy array. rtol: Relative error tolerance. atol: Absolute error tolerance. equal_nan: Whether or not NaN entries should be considered equal to other NaN entries.
def allclose_up_to_global_phase( a: np.ndarray, b: np.ndarray, *, rtol: float = 1.e-5, atol: float = 1.e-8, equal_nan: bool = False ) -> bool: a, b = transformations.match_global_phase(a, b) # Should now be equivalent. return np.allclose(a=a, b=b, rtol=rtol, atol=atol, equal_nan=equal_nan)
126,649
Initializes the scheduled operation. Args: time: When the operation starts. duration: How long the operation lasts. operation: The operation.
def __init__(self, time: Timestamp, duration: Union[Duration, timedelta], operation: ops.Operation) -> None: self.time = time self.duration = Duration.create(duration) self.operation = operation
126,651
Initializes linear combination from a collection of terms. Args: terms: Mapping of gates to coefficients in the linear combination being initialized.
def __init__(self, terms: Mapping[raw_types.Gate, value.Scalar]) -> None: super().__init__(terms, validator=self._is_compatible)
126,657
Returns an application of this gate to the given qubits. Args: *qubits: The collection of qubits to potentially apply the gate to.
def on(self, *qubits: raw_types.Qid) -> 'SingleQubitPauliStringGateOperation': if len(qubits) != 1: raise ValueError( 'Expected a single qubit, got <{!r}>.'.format(qubits)) from cirq.ops.pauli_string import SingleQubitPauliStringGateOperation return SingleQubitPauliStringGateOperation(self, qubits[0])
126,671
A basis that orders qubits ascending based on a key function. Args: key: A function that takes a qubit and returns a key value. The basis will be ordered ascending according to these key values. Returns: A basis that orders qubits ascending based on a key function.
def sorted_by(key: Callable[[raw_types.Qid], Any]) -> 'QubitOrder': return QubitOrder(lambda qubits: tuple(sorted(qubits, key=key)))
126,678
Returns a qubit tuple ordered corresponding to the basis. Args: qubits: Qubits that should be included in the basis. (Additional qubits may be added into the output by the basis.) Returns: A tuple of qubits in the same order that their single-qubit matrices would be passed into `np.kron` when producing a matrix for the entire system.
def order_for(self, qubits: Iterable[raw_types.Qid] ) -> Tuple[raw_types.Qid, ...]: return self._explicit_func(qubits)
126,679
Converts a value into a basis. Args: val: An iterable or a basis. Returns: The basis implied by the value.
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList' ) -> 'QubitOrder': if isinstance(val, collections.Iterable): return QubitOrder.explicit(val) if isinstance(val, QubitOrder): return val raise ValueError( "Don't know how to interpret <{}> as a Basis.".format(val))
126,680
Returns a range of NamedQubits. The range returned starts with the prefix, and followed by a qubit for each number in the range, e.g.: NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3] NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3] Args: *args: Args to be passed to Python's standard range function. prefix: A prefix for constructed NamedQubits. Returns: A list of NamedQubits.
def range(*args, prefix: str): return [NamedQubit(prefix + str(i)) for i in range(*args)]
126,683
Tests if a circuit for an operator exp(i*rad*XX) (or YY, or ZZ) can be performed with a whole CZ. Args: rad: The angle in radians, assumed to be in the range [-pi/4, pi/4]
def _is_trivial_angle(rad: float, atol: float) -> bool: return abs(rad) < atol or abs(abs(rad) - np.pi / 4) < atol
126,696
A sparse matrix simulator. Args: dtype: The `numpy.dtype` used by the simulation. One of `numpy.complex64` or `numpy.complex128`
def __init__(self, *, dtype=np.complex64): if dtype not in {np.complex64, np.complex128}: raise ValueError( 'dtype must be complex64 or complex128 but was {}'.format( dtype)) self._dtype = dtype
126,703
Returns an acquaintance strategy capable of executing a gate corresponding to any set of at most acquaintance_size qubits. Args: qubit_order: The qubits on which the strategy should be defined. acquaintance_size: The maximum number of qubits to be acted on by an operation. Returns: An circuit capable of implementing any set of k-local operation.
def complete_acquaintance_strategy(qubit_order: Sequence[ops.Qid], acquaintance_size: int=0, ) -> circuits.Circuit: if acquaintance_size < 0: raise ValueError('acquaintance_size must be non-negative.') elif acquaintance_size == 0: return circuits.Circuit(device=UnconstrainedAcquaintanceDevice) if acquaintance_size > len(qubit_order): return circuits.Circuit(device=UnconstrainedAcquaintanceDevice) if acquaintance_size == len(qubit_order): return circuits.Circuit.from_ops( acquaint(*qubit_order), device=UnconstrainedAcquaintanceDevice) strategy = circuits.Circuit.from_ops( (acquaint(q) for q in qubit_order), device=UnconstrainedAcquaintanceDevice) for size_to_acquaint in range(2, acquaintance_size + 1): expose_acquaintance_gates(strategy) replace_acquaintance_with_swap_network( strategy, qubit_order, size_to_acquaint) return strategy
126,722
Iterates through relevant python files within the given directory. Args: directory: The top-level directory to explore. Yields: File paths.
def get_unhidden_ungenerated_python_files(directory: str) -> Iterable[str]: for dirpath, dirnames, filenames in os.walk(directory, topdown=True): if os.path.split(dirpath)[-1].startswith('.'): dirnames.clear() continue for filename in filenames: if filename.endswith('.py') and not filename.endswith('_pb2.py'): yield os.path.join(dirpath, filename)
126,727
Creates a new virtual environment and then installs dependencies. Args: venv_path: Where to put the virtual environment's state. requirements_paths: Location of requirements files to -r install. python_path: The python binary to use. verbose: When set, more progress output is produced.
def create_virtual_env(venv_path: str, requirements_paths: Iterable[str], python_path: str, verbose: bool) -> None: shell_tools.run_cmd('virtualenv', None if verbose else '--quiet', '-p', python_path, venv_path, out=sys.stderr) pip_path = os.path.join(venv_path, 'bin', 'pip') for req_path in requirements_paths: shell_tools.run_cmd(pip_path, 'install', None if verbose else '--quiet', '-r', req_path, out=sys.stderr)
126,728
Creates a python 2.7 environment starting from a prepared python 3 one. Args: destination_directory: Where to put the python 2 environment. python3_environment: The prepared environment to start from. verbose: When set, more progress output is produced. env_name: The name to use for the virtualenv directory. python_path: The python binary to use. Returns: A description of the environment that was prepared.
def derive_temporary_python2_environment( destination_directory: str, python3_environment: PreparedEnv, verbose: bool, env_name: str = '.test_virtualenv_py2', python_path: str = "/usr/bin/python2.7") -> PreparedEnv: shutil.rmtree(destination_directory) input_directory = cast(str, python3_environment.destination_directory) os.chdir(input_directory) conversion_script_path = os.path.join( input_directory, 'dev_tools', 'python2.7-generate.sh') shell_tools.run_cmd('bash', conversion_script_path, destination_directory, input_directory, python3_environment.virtual_env_path, out=sys.stderr) os.chdir(destination_directory) # Create virtual environment. env_path = os.path.join(destination_directory, env_name) # (These files are output by dev_tools/python2.7-generate.sh.) req_path = os.path.join(destination_directory, 'requirements.txt') dev_req_path = os.path.join(destination_directory, 'pip-list-test-tools.txt') contrib_req_path = os.path.join(destination_directory, 'cirq', 'contrib', 'contrib-requirements.txt') req_paths = [req_path, dev_req_path, contrib_req_path] create_virtual_env(venv_path=env_path, python_path=python_path, requirements_paths=req_paths, verbose=verbose) return PreparedEnv(github_repo=python3_environment.repository, actual_commit_id=python3_environment.actual_commit_id, compare_commit_id=python3_environment.compare_commit_id, destination_directory=destination_directory, virtual_env_path=env_path)
126,730
Searches for linear sequence of qubits on device. Args: device: Google Xmon device instance. length: Desired number of qubits making up the line. method: Line placement method. Defaults to cirq.greedy.GreedySequenceSearchMethod. Returns: Line sequences search results.
def line_on_device( device: 'cirq.google.XmonDevice', length: int, method: LinePlacementStrategy = greedy.GreedySequenceSearchStrategy() ) -> GridQubitLineTuple: return method.place_line(device, length)
126,756
Returns a list of text lines representing the block's contents. Args: width: The width of the output text. Must be at least as large as the block's minimum width. height: The height of the output text. Must be at least as large as the block's minimum height. Returns: Text pre-split into lines.
def render(self, width: int, height: int) -> List[str]: if width == 0 or height == 0: return [''] * height out_chars = [[' '] * width for _ in range(height)] mid_x = int((width - 1) * self.horizontal_alignment) mid_y = (height - 1) // 2 # Horizontal line legs. if self.left: out_chars[mid_y][:mid_x + 1] = self.left * (mid_x + 1) if self.right: out_chars[mid_y][mid_x:] = self.right * (width - mid_x) # Vertical line legs. if self.top: for y in range(mid_y + 1): out_chars[y][mid_x] = self.top if self.bottom: for y in range(mid_y, height): out_chars[y][mid_x] = self.bottom # Central content. mid = self.content or self.center if self.content or self.center: content_lines = mid.split('\n') y = mid_y - (len(content_lines) - 1) // 2 for dy, content_line in enumerate(content_lines): s = int((len(content_line) - 1) * self.horizontal_alignment) x = mid_x - s for dx, c in enumerate(content_line): out_chars[y + dy][x + dx] = c return [''.join(line) for line in out_chars]
126,772
Returns the handle of a RawArray created from the given numpy array. Args: arr: A numpy ndarray. Returns: The handle (int) of the array. Raises: ValueError: if arr is not a ndarray or of an unsupported dtype. If the array is of an unsupported type, using a view of the array to another dtype and then converting on get is often a work around.
def _create_array(self, arr: np.ndarray) -> int: if not isinstance(arr, np.ndarray): raise ValueError('Array is not a numpy ndarray.') try: c_arr = np.ctypeslib.as_ctypes(arr) except (KeyError, NotImplementedError): raise ValueError( 'Array has unsupported dtype {}.'.format(arr.dtype)) # pylint: disable=protected-access raw_arr = RawArray(c_arr._type_, c_arr) with self._lock: if self._count >= len(self._arrays): self._arrays += len(self._arrays) * [None] self._get_next_free() # Note storing the shape is a workaround for an issue encountered # when upgrading to numpy 1.15. # See https://github.com/numpy/numpy/issues/11636 self._arrays[self._current] = (raw_arr, arr.shape) self._count += 1 return self._current
126,797
Frees the memory for the array with the given handle. Args: handle: The handle of the array whose memory should be freed. This handle must come from the _create_array method.
def _free_array(self, handle: int): with self._lock: if self._arrays[handle] is not None: self._arrays[handle] = None self._count -= 1
126,799
Returns the array with the given handle. Args: handle: The handle of the array whose memory should be freed. This handle must come from the _create_array method. Returns: The numpy ndarray with the handle given from _create_array.
def _get_array(self, handle: int) -> np.ndarray: tup = self._arrays[handle] assert tup is not None c_arr, shape = tup with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) result = np.ctypeslib.as_array(c_arr) result.shape = shape return result
126,800
A QASM gate representing any single qubit unitary with a series of three rotations, Z, Y, and Z. The angles are normalized to the range [0, 2) half_turns. Args: lmda: Half turns to rotate about Z (applied first). theta: Half turns to rotate about Y. phi: Half turns to rotate about Z (applied last).
def __init__(self, lmda, theta, phi) -> None: self.lmda = lmda % 2 self.theta = theta % 2 self.phi = phi % 2
126,802
Splits moments so that they contain either only acquaintance gates or only permutation gates. Orders resulting moments so that the first one is of the same type as the previous one. Args: circuit: The acquaintance strategy to rectify. acquaint_first: Whether to make acquaintance moment first in when splitting the first mixed moment.
def rectify_acquaintance_strategy( circuit: circuits.Circuit, acquaint_first: bool=True ) -> None: if not is_acquaintance_strategy(circuit): raise TypeError('not is_acquaintance_strategy(circuit)') rectified_moments = [] for moment in circuit: gate_type_to_ops = collections.defaultdict(list ) # type: Dict[bool, List[ops.GateOperation]] for op in moment.operations: gate_type_to_ops[isinstance(op.gate, AcquaintanceOpportunityGate) ].append(op) if len(gate_type_to_ops) == 1: rectified_moments.append(moment) continue for acquaint_first in sorted(gate_type_to_ops.keys(), reverse=acquaint_first): rectified_moments.append( ops.Moment(gate_type_to_ops[acquaint_first])) circuit._moments = rectified_moments
126,814
Check if a gate is a native ion gate. Args: gate: Input gate. Returns: True if the gate is native to the ion, false otherwise.
def is_native_ion_gate(gate: ops.Gate) -> bool: return isinstance(gate, (ops.XXPowGate, ops.MeasurementGate, ops.XPowGate, ops.YPowGate, ops.ZPowGate))
126,833
Convert a single (one- or two-qubit) operation into ion trap native gates Args: op: gate operation to be converted Returns: the desired operation implemented with ion trap gates
def convert_one(self, op: ops.Operation) -> ops.OP_TREE: # Known gate name if not isinstance(op, ops.GateOperation): raise TypeError("{!r} is not a gate operation.".format(op)) if is_native_ion_gate(op.gate): return [op] # one choice of known Hadamard gate decomposition if isinstance(op.gate, ops.HPowGate) and op.gate.exponent == 1: return [ops.Rx(np.pi).on(op.qubits[0]), ops.Ry(-1 * np.pi/2).on(op.qubits[0])] # one choice of known CNOT gate decomposition if isinstance(op.gate, ops.CNotPowGate) and op.gate.exponent == 1: return [ops.Ry(np.pi/2).on(op.qubits[0]), MS(np.pi/4).on(op.qubits[0], op.qubits[1]), ops.Rx(-1*np.pi/2).on(op.qubits[0]), ops.Rx(-1*np.pi/2).on(op.qubits[1]), ops.Ry(-1*np.pi/2).on(op.qubits[0])] # Known matrix mat = protocols.unitary(op, None) if len( op.qubits) <= 2 else None if mat is not None and len(op.qubits) == 1: gates = optimizers.single_qubit_matrix_to_phased_x_z(mat) return [g.on(op.qubits[0]) for g in gates] elif mat is not None and len(op.qubits) == 2: return two_qubit_matrix_to_ion_operations( op.qubits[0], op.qubits[1], mat) else: if self.ignore_failures: return [op] else: raise TypeError( "Don't know how to work with {!r}. " "It isn't a native Ion Trap operation, " "a 1 or 2 qubit gate with a known unitary, " "or composite.".format(op.gate))
126,835
Returns an orthogonal matrix that diagonalizes the given matrix. Args: matrix: A real symmetric matrix to diagonalize. rtol: float = 1e-5, atol: float = 1e-8 Returns: An orthogonal matrix P such that P.T @ matrix @ P is diagonal. Raises: ValueError: Matrix isn't real symmetric.
def diagonalize_real_symmetric_matrix( matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8) -> np.ndarray: # TODO: Determine if thresholds should be passed into is_hermitian if np.any(np.imag(matrix) != 0) or not predicates.is_hermitian(matrix): raise ValueError('Input must be real and symmetric.') _, result = np.linalg.eigh(matrix) return result
126,837
Splits range(length) into approximate equivalence classes. Args: length: The length of the range to split. comparator: Determines if two indices have approximately equal items. Returns: A list of (inclusive_start, exclusive_end) range endpoints. Each corresponds to a run of approximately-equivalent items.
def _contiguous_groups( length: int, comparator: Callable[[int, int], bool] ) -> List[Tuple[int, int]]: result = [] start = 0 while start < length: past = start + 1 while past < length and comparator(start, past): past += 1 result.append((start, past)) start = past return result
126,838
Initializes a new schedule. Args: device: The hardware this schedule will run on. scheduled_operations: Initial list of operations to apply. These will be moved into a sorted list, with a key equal to each operation's start time.
def __init__(self, device: Device, scheduled_operations: Iterable[ScheduledOperation] = () ) -> None: self.device = device self.scheduled_operations = SortedListWithKey(scheduled_operations, key=lambda e: e.time) self._max_duration = max( [e.duration for e in self.scheduled_operations] or [Duration()])
126,846
Finds operations overlapping a given time or time slice. Args: item: Either a Timestamp or a slice containing start and stop Timestamps. Returns: The scheduled operations that occurs during the given time.
def __getitem__(self, item: Union[Timestamp, slice]): if isinstance(item, slice): if item.step: raise ValueError('Step not supported.') start = cast(Timestamp, item.start) stop = cast(Timestamp, item.stop) return self.query(time=start, duration=stop - start) return self.query(time=item, include_query_end_time=True)
126,849
Finds operations happening at the same time as the given operation. Args: scheduled_operation: The operation specifying the time to query. Returns: Scheduled operations that overlap with the given operation.
def operations_happening_at_same_time_as( self, scheduled_operation: ScheduledOperation ) -> List[ScheduledOperation]: overlaps = self.query( time=scheduled_operation.time, duration=scheduled_operation.duration) return [e for e in overlaps if e != scheduled_operation]
126,850
Adds a scheduled operation to the schedule. Args: scheduled_operation: The operation to add. Raises: ValueError: The operation collided with something already in the schedule.
def include(self, scheduled_operation: ScheduledOperation): collisions = self.query(time=scheduled_operation.time, duration=scheduled_operation.duration, qubits=scheduled_operation.operation.qubits) if collisions: raise ValueError('Operation {} has collisions: {}'.format( scheduled_operation.operation, collisions)) self.scheduled_operations.add(scheduled_operation) self._max_duration = max(self._max_duration, scheduled_operation.duration)
126,851
Omits a scheduled operation from the schedule, if present. Args: scheduled_operation: The operation to try to remove. Returns: True if the operation was present and is now removed, False if it was already not present.
def exclude(self, scheduled_operation: ScheduledOperation) -> bool: try: self.scheduled_operations.remove(scheduled_operation) return True except ValueError: return False
126,852
Breaks down a 2x2 unitary into more useful ZYZ angle parameters. Args: mat: The 2x2 unitary matrix to break down. Returns: A tuple containing the amount to phase around Z, then rotate around Y, then phase around Z (all in radians).
def deconstruct_single_qubit_matrix_into_angles( mat: np.ndarray) -> Tuple[float, float, float]: # Anti-cancel left-vs-right phase along top row. right_phase = cmath.phase(mat[0, 1] * np.conj(mat[0, 0])) + math.pi mat = np.dot(mat, _phase_matrix(-right_phase)) # Cancel top-vs-bottom phase along left column. bottom_phase = cmath.phase(mat[1, 0] * np.conj(mat[0, 0])) mat = np.dot(_phase_matrix(-bottom_phase), mat) # Lined up for a rotation. Clear the off-diagonal cells with one. rotation = math.atan2(abs(mat[1, 0]), abs(mat[0, 0])) mat = np.dot(_rotation_matrix(-rotation), mat) # Cancel top-left-vs-bottom-right phase. diagonal_phase = cmath.phase(mat[1, 1] * np.conj(mat[0, 0])) # Note: Ignoring global phase. return right_phase + diagonal_phase, rotation * 2, bottom_phase
126,863
Combines similar items into groups. Args: items: The list of items to group. comparer: Determines if two items are similar. Returns: A list of groups of items.
def _group_similar(items: List[T], comparer: Callable[[T, T], bool]) -> List[List[T]]: groups = [] # type: List[List[T]] used = set() # type: Set[int] for i in range(len(items)): if i not in used: group = [items[i]] for j in range(i + 1, len(items)): if j not in used and comparer(items[i], items[j]): used.add(j) group.append(items[j]) groups.append(group) return groups
126,864
Applies a function to the eigenvalues of a matrix. Given M = sum_k a_k |v_k><v_k|, returns f(M) = sum_k f(a_k) |v_k><v_k|. Args: matrix: The matrix to modify with the function. func: The function to apply to the eigenvalues of the matrix. rtol: Relative threshold used when separating eigenspaces. atol: Absolute threshold used when separating eigenspaces. Returns: The transformed matrix.
def map_eigenvalues( matrix: np.ndarray, func: Callable[[complex], complex], *, rtol: float = 1e-5, atol: float = 1e-8) -> np.ndarray: vals, vecs = _perp_eigendecompose(matrix, rtol=rtol, atol=atol) pieces = [np.outer(vec, np.conj(vec.T)) for vec in vecs] out_vals = np.vectorize(func)(vals.astype(complex)) total = np.zeros(shape=matrix.shape) for piece, val in zip(pieces, out_vals): total = np.add(total, piece * val) return total
126,866
Updates a mapping (in place) from qubits to logical indices according to a set of permutation gates. Any gates other than permutation gates are ignored. Args: mapping: The mapping to update. operations: The operations to update according to.
def update_mapping(mapping: Dict[ops.Qid, LogicalIndex], operations: ops.OP_TREE ) -> None: for op in ops.flatten_op_tree(operations): if (isinstance(op, ops.GateOperation) and isinstance(op.gate, PermutationGate)): op.gate.update_mapping(mapping, op.qubits)
126,872
Updates a mapping (in place) from qubits to logical indices. Args: mapping: The mapping to update. keys: The qubits acted on by the gate.
def update_mapping(self, mapping: Dict[ops.Qid, LogicalIndex], keys: Sequence[ops.Qid] ) -> None: permutation = self.permutation() indices = tuple(permutation.keys()) new_keys = [keys[permutation[i]] for i in indices] old_elements = [mapping[keys[i]] for i in indices] mapping.update(zip(new_keys, old_elements))
126,874
Adds text to the given location. Args: x: The column in which to write the text. y: The row in which to write the text. text: The text to write at location (x, y). transposed_text: Optional text to write instead, if the text diagram is transposed.
def write(self, x: int, y: int, text: str, transposed_text: 'Optional[str]' = None): entry = self.entries.get((x, y), _DiagramText('', '')) self.entries[(x, y)] = _DiagramText( entry.text + text, entry.transposed_text + (transposed_text if transposed_text else text))
126,882