INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Function is used to get a message from the iopub channel. Timeout is None by default When timeout is reached
def get_message(self, stream, timeout=None): """ Function is used to get a message from the iopub channel. Timeout is None by default When timeout is reached """ try: if stream == 'iopub': msg = self.kc.get_iopub_msg(timeout=timeout) elif stream == 'shell': msg = self.kc.get_shell_msg(timeout=timeout) else: raise ValueError('Invalid stream specified: "%s"' % stream) except Empty: logger.debug('Kernel: Timeout waiting for message on %s', stream) raise logger.debug("Kernel message (%s):\n%s", stream, pformat(msg)) return msg
Executes a string of python code in cell input. We do not allow the kernel to make requests to the stdin this is the norm for notebooks Function returns a unique message id of the reply from the kernel.
def execute_cell_input(self, cell_input, allow_stdin=None): """ Executes a string of python code in cell input. We do not allow the kernel to make requests to the stdin this is the norm for notebooks Function returns a unique message id of the reply from the kernel. """ if cell_input: logger.debug('Executing cell: "%s"...', cell_input.splitlines()[0][:40]) else: logger.debug('Executing empty cell') return self.kc.execute(cell_input, allow_stdin=allow_stdin, stop_on_error=False)
Continuously poll the kernel 'shell' stream for messages until: - It receives an 'execute_reply' status for the given message id - The timeout is reached awaiting a message, in which case a `Queue.Empty` exception will be raised.
def await_reply(self, msg_id, timeout=None): """ Continuously poll the kernel 'shell' stream for messages until: - It receives an 'execute_reply' status for the given message id - The timeout is reached awaiting a message, in which case a `Queue.Empty` exception will be raised. """ while True: msg = self.get_message(stream='shell', timeout=timeout) # Is this the message we are waiting for? if msg['parent_header'].get('msg_id') == msg_id: if msg['content']['status'] == 'aborted': # This should not occur! raise RuntimeError('Kernel aborted execution request') return
Poll the iopub stream until an idle message is received for the given parent ID
def await_idle(self, parent_id, timeout): """Poll the iopub stream until an idle message is received for the given parent ID""" while True: # Get a message from the kernel iopub channel msg = self.get_message(timeout=timeout, stream='iopub') # raises Empty on timeout! if msg['parent_header'].get('msg_id') != parent_id: continue if msg['msg_type'] == 'status': if msg['content']['execution_state'] == 'idle': break
Instructs the kernel process to stop channels and the kernel manager to then shutdown the process.
def stop(self): """ Instructs the kernel process to stop channels and the kernel manager to then shutdown the process. """ logger.debug('Stopping kernel') self.kc.stop_channels() self.km.shutdown_kernel(now=True) del self.km
Get a list of index values for Validation set from a dataset Arguments: n : int, Total number of elements in the data set. cv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)] val_pct : (int, float), validation set percentage seed : seed value for RandomState Returns: list of indexes
def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42): """ Get a list of index values for Validation set from a dataset Arguments: n : int, Total number of elements in the data set. cv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)] val_pct : (int, float), validation set percentage seed : seed value for RandomState Returns: list of indexes """ np.random.seed(seed) n_val = int(val_pct*n) idx_start = cv_idx*n_val idxs = np.random.permutation(n) return idxs[idx_start:idx_start+n_val]
Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ.
def resize_img(fname, targ, path, new_path, fn=None): """ Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ. """ if fn is None: fn = resize_fn(targ) dest = os.path.join(path_for(path, new_path, targ), fname) if os.path.exists(dest): return im = Image.open(os.path.join(path, fname)).convert('RGB') os.makedirs(os.path.split(dest)[0], exist_ok=True) fn(im).save(dest)
Enlarge or shrink a set of images in the same directory to scale, such that the smaller of the height or width dimension is equal to targ. Note: -- This function is multithreaded for efficiency. -- When destination file or folder already exist, function exists without raising an error.
def resize_imgs(fnames, targ, path, new_path, resume=True, fn=None): """ Enlarge or shrink a set of images in the same directory to scale, such that the smaller of the height or width dimension is equal to targ. Note: -- This function is multithreaded for efficiency. -- When destination file or folder already exist, function exists without raising an error. """ target_path = path_for(path, new_path, targ) if resume: subdirs = {os.path.dirname(p) for p in fnames} subdirs = {s for s in subdirs if os.path.exists(os.path.join(target_path, s))} already_resized_fnames = set() for subdir in subdirs: files = [os.path.join(subdir, file) for file in os.listdir(os.path.join(target_path, subdir))] already_resized_fnames.update(set(files)) original_fnames = set(fnames) fnames = list(original_fnames - already_resized_fnames) errors = {} def safely_process(fname): try: resize_img(fname, targ, path, new_path, fn=fn) except Exception as ex: errors[fname] = str(ex) if len(fnames) > 0: with ThreadPoolExecutor(num_cpus()) as e: ims = e.map(lambda fname: safely_process(fname), fnames) for _ in tqdm(ims, total=len(fnames), leave=False): pass if errors: print('Some images failed to process:') print(json.dumps(errors, indent=2)) return os.path.join(path,new_path,str(targ))
Returns a list of relative file paths to `path` for all files within `folder`
def read_dir(path, folder): """ Returns a list of relative file paths to `path` for all files within `folder` """ full_path = os.path.join(path, folder) fnames = glob(f"{full_path}/*.*") directories = glob(f"{full_path}/*/") if any(fnames): return [os.path.relpath(f,path) for f in fnames] elif any(directories): raise FileNotFoundError("{} has subdirectories but contains no files. Is your directory structure is correct?".format(full_path)) else: raise FileNotFoundError("{} folder doesn't exist or is empty".format(full_path))
Fetches name of all files in path in long form, and labels associated by extrapolation of directory names.
def read_dirs(path, folder): ''' Fetches name of all files in path in long form, and labels associated by extrapolation of directory names. ''' lbls, fnames, all_lbls = [], [], [] full_path = os.path.join(path, folder) for lbl in sorted(os.listdir(full_path)): if lbl not in ('.ipynb_checkpoints','.DS_Store'): all_lbls.append(lbl) for fname in os.listdir(os.path.join(full_path, lbl)): if fname not in ('.DS_Store'): fnames.append(os.path.join(folder, lbl, fname)) lbls.append(lbl) return fnames, lbls, all_lbls
one hot encoding by index. Returns array of length c, where all entries are 0, except for the indecies in ids
def n_hot(ids, c): ''' one hot encoding by index. Returns array of length c, where all entries are 0, except for the indecies in ids ''' res = np.zeros((c,), dtype=np.float32) res[ids] = 1 return res
Returns the filenames and labels for a folder within a path Returns: ------- fnames: a list of the filenames within `folder` all_lbls: a list of all of the labels in `folder`, where the # of labels is determined by the # of directories within `folder` lbl_arr: a numpy array of the label indices in `all_lbls`
def folder_source(path, folder): """ Returns the filenames and labels for a folder within a path Returns: ------- fnames: a list of the filenames within `folder` all_lbls: a list of all of the labels in `folder`, where the # of labels is determined by the # of directories within `folder` lbl_arr: a numpy array of the label indices in `all_lbls` """ fnames, lbls, all_lbls = read_dirs(path, folder) lbl2idx = {lbl:idx for idx,lbl in enumerate(all_lbls)} idxs = [lbl2idx[lbl] for lbl in lbls] lbl_arr = np.array(idxs, dtype=int) return fnames, lbl_arr, all_lbls
Parse filenames and label sets from a CSV file. This method expects that the csv file at path :fn: has two columns. If it has a header, :skip_header: should be set to True. The labels in the label set are expected to be space separated. Arguments: fn: Path to a CSV file. skip_header: A boolean flag indicating whether to skip the header. Returns: a two-tuple of ( image filenames, a dictionary of filenames and corresponding labels ) . :param cat_separator: the separator for the categories column
def parse_csv_labels(fn, skip_header=True, cat_separator = ' '): """Parse filenames and label sets from a CSV file. This method expects that the csv file at path :fn: has two columns. If it has a header, :skip_header: should be set to True. The labels in the label set are expected to be space separated. Arguments: fn: Path to a CSV file. skip_header: A boolean flag indicating whether to skip the header. Returns: a two-tuple of ( image filenames, a dictionary of filenames and corresponding labels ) . :param cat_separator: the separator for the categories column """ df = pd.read_csv(fn, index_col=0, header=0 if skip_header else None, dtype=str) fnames = df.index.values df.iloc[:,0] = df.iloc[:,0].str.split(cat_separator) return fnames, list(df.to_dict().values())[0]
True if the fn points to a DICOM image
def isdicom(fn): '''True if the fn points to a DICOM image''' fn = str(fn) if fn.endswith('.dcm'): return True # Dicom signature from the dicom spec. with open(fn,'rb') as fh: fh.seek(0x80) return fh.read(4)==b'DICM'
Opens an image using OpenCV given the file path. Arguments: fn: the file path of the image Returns: The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0
def open_image(fn): """ Opens an image using OpenCV given the file path. Arguments: fn: the file path of the image Returns: The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0 """ flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR if not os.path.exists(fn) and not str(fn).startswith("http"): raise OSError('No such file or directory: {}'.format(fn)) elif os.path.isdir(fn) and not str(fn).startswith("http"): raise OSError('Is a directory: {}'.format(fn)) elif isdicom(fn): slice = pydicom.read_file(fn) if slice.PhotometricInterpretation.startswith('MONOCHROME'): # Make a fake RGB image im = np.stack([slice.pixel_array]*3,-1) return im / ((1 << slice.BitsStored)-1) else: # No support for RGB yet, as it involves various color spaces. # It shouldn't be too difficult to add though, if needed. raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation)) else: #res = np.array(Image.open(fn), dtype=np.float32)/255 #if len(res.shape)==2: res = np.repeat(res[...,None],3,2) #return res try: if str(fn).startswith("http"): req = urllib.urlopen(str(fn)) image = np.asarray(bytearray(req.read()), dtype="uint8") im = cv2.imdecode(image, flags).astype(np.float32)/255 else: im = cv2.imread(str(fn), flags).astype(np.float32)/255 if im is None: raise OSError(f'File not recognized by opencv: {fn}') return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) except Exception as e: raise OSError('Error handling image at: {}'.format(fn)) from e
Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements) This can be used to split multiple arrays containing training data to validation and training set. :param idxs [int]: list of indexes selected :param a list: list of np.array, each array should have same amount of elements in the first dimension :return: list of tuples, each containing a split of corresponding array from *a. First element of each tuple is an array composed from elements selected by idxs, second element is an array of remaining elements.
def split_by_idx(idxs, *a): """ Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements) This can be used to split multiple arrays containing training data to validation and training set. :param idxs [int]: list of indexes selected :param a list: list of np.array, each array should have same amount of elements in the first dimension :return: list of tuples, each containing a split of corresponding array from *a. First element of each tuple is an array composed from elements selected by idxs, second element is an array of remaining elements. """ mask = np.zeros(len(a[0]),dtype=bool) mask[np.array(idxs)] = True return [(o[mask],o[~mask]) for o in a]
resize all images in the dataset and save them to `new_path` Arguments: targ (int): the target size new_path (string): the new folder to save the images resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence of individual images rather than the existence of the directory fn (function): custom resizing function Img -> Img
def resize_imgs(self, targ, new_path, resume=True, fn=None): """ resize all images in the dataset and save them to `new_path` Arguments: targ (int): the target size new_path (string): the new folder to save the images resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence of individual images rather than the existence of the directory fn (function): custom resizing function Img -> Img """ dest = resize_imgs(self.fnames, targ, self.path, new_path, resume, fn) return self.__class__(self.fnames, self.y, self.transform, dest)
Reverse the normalization done to a batch of images. Arguments: arr: of shape/size (N,3,sz,sz)
def denorm(self,arr): """Reverse the normalization done to a batch of images. Arguments: arr: of shape/size (N,3,sz,sz) """ if type(arr) is not np.ndarray: arr = to_np(arr) if len(arr.shape)==3: arr = arr[None] return self.transform.denorm(np.rollaxis(arr,1,4))
Return a copy of this dataset resized
def resized(self, dl, targ, new_path, resume = True, fn=None): """ Return a copy of this dataset resized """ return dl.dataset.resize_imgs(targ, new_path, resume=resume, fn=fn) if dl else None
Resizes all the images in the train, valid, test folders to a given size. Arguments: targ_sz (int): the target size new_path (str): the path to save the resized images (default tmp) resume (bool): if True, check for images in the DataSet that haven't been resized yet (useful if a previous resize operation was aborted) fn (function): optional custom resizing function
def resize(self, targ_sz, new_path='tmp', resume=True, fn=None): """ Resizes all the images in the train, valid, test folders to a given size. Arguments: targ_sz (int): the target size new_path (str): the path to save the resized images (default tmp) resume (bool): if True, check for images in the DataSet that haven't been resized yet (useful if a previous resize operation was aborted) fn (function): optional custom resizing function """ new_ds = [] dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl] if self.test_dl: dls += [self.test_dl, self.test_aug_dl] else: dls += [None,None] t = tqdm_notebook(dls) for dl in t: new_ds.append(self.resized(dl, targ_sz, new_path, resume, fn)) t.close() return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)
Read in images and their labels given as numpy arrays Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) trn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the shape of `(5000, 784)` and `y` has the shape of `(5000,)`) val: a tuple of validation data matrix and target label/classification array. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` classes: a list of all labels/classifications num_workers: a number of workers test: a matrix of test data (the shape should match `trn[0]`) Returns: ImageClassifierData
def from_arrays(cls, path, trn, val, bs=64, tfms=(None,None), classes=None, num_workers=4, test=None, continuous=False): """ Read in images and their labels given as numpy arrays Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) trn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the shape of `(5000, 784)` and `y` has the shape of `(5000,)`) val: a tuple of validation data matrix and target label/classification array. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` classes: a list of all labels/classifications num_workers: a number of workers test: a matrix of test data (the shape should match `trn[0]`) Returns: ImageClassifierData """ f = ArraysIndexRegressionDataset if continuous else ArraysIndexDataset datasets = cls.get_ds(f, trn, val, tfms, test=test) return cls(path, datasets, bs, num_workers, classes=classes)
Read in images and their labels given as sub-folder names Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` trn_name: a name of the folder that contains training images. val_name: a name of the folder that contains validation images. test_name: a name of the folder that contains test images. num_workers: number of workers Returns: ImageClassifierData
def from_paths(cls, path, bs=64, tfms=(None,None), trn_name='train', val_name='valid', test_name=None, test_with_labels=False, num_workers=8): """ Read in images and their labels given as sub-folder names Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` trn_name: a name of the folder that contains training images. val_name: a name of the folder that contains validation images. test_name: a name of the folder that contains test images. num_workers: number of workers Returns: ImageClassifierData """ assert not(tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" trn,val = [folder_source(path, o) for o in (trn_name, val_name)] if test_name: test = folder_source(path, test_name) if test_with_labels else read_dir(path, test_name) else: test = None datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test) return cls(path, datasets, bs, num_workers, classes=trn[2])
Read in images and their labels given as a CSV file. This method should be used when training image labels are given in an CSV file as opposed to sub-directories with label names. Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) folder: a name of the folder in which training images are contained. csv_fname: a name of the CSV file which contains target labels. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`. If None, default arguments to get_cv_idxs are used. suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file extension e.g. '.jpg' - in which case, you can set suffix as '.jpg') test_name: a name of the folder which contains test images. continuous: if True, the data set is used to train regression models. If False, it is used to train classification models. skip_header: skip the first row of the CSV file. num_workers: number of workers cat_separator: Labels category separator Returns: ImageClassifierData
def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None), val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8, cat_separator=' '): """ Read in images and their labels given as a CSV file. This method should be used when training image labels are given in an CSV file as opposed to sub-directories with label names. Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) folder: a name of the folder in which training images are contained. csv_fname: a name of the CSV file which contains target labels. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`. If None, default arguments to get_cv_idxs are used. suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file extension e.g. '.jpg' - in which case, you can set suffix as '.jpg') test_name: a name of the folder which contains test images. continuous: if True, the data set is used to train regression models. If False, it is used to train classification models. skip_header: skip the first row of the CSV file. num_workers: number of workers cat_separator: Labels category separator Returns: ImageClassifierData """ assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" assert not (os.path.isabs(folder)), "folder needs to be a relative path" fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous, cat_separator=cat_separator) return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, suffix=suffix, tfms=tfms, bs=bs, continuous=continuous)
Read in images given a sub-folder and their labels given a numpy array Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) folder: a name of the folder in which training images are contained. y: numpy array which contains target labels ordered by filenames. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`. If None, default arguments to get_cv_idxs are used. test_name: a name of the folder which contains test images. num_workers: number of workers Returns: ImageClassifierData
def from_path_and_array(cls, path, folder, y, classes=None, val_idxs=None, test_name=None, num_workers=8, tfms=(None,None), bs=64): """ Read in images given a sub-folder and their labels given a numpy array Arguments: path: a root path of the data (used for storing trained models, precomputed values, etc) folder: a name of the folder in which training images are contained. y: numpy array which contains target labels ordered by filenames. bs: batch size tfms: transformations (for data augmentations). e.g. output of `tfms_from_model` val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`. If None, default arguments to get_cv_idxs are used. test_name: a name of the folder which contains test images. num_workers: number of workers Returns: ImageClassifierData """ assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" assert not (os.path.isabs(folder)), "folder needs to be a relative path" fnames = np.core.defchararray.add(f'{folder}/', sorted(os.listdir(f'{path}{folder}'))) return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, tfms=tfms, bs=bs)
Is the code running in the ipython environment (jupyter including)
def is_in_ipython(): "Is the code running in the ipython environment (jupyter including)" program_name = os.path.basename(os.getenv('_', '')) if ('jupyter-notebook' in program_name or # jupyter-notebook 'ipython' in program_name or # ipython 'JPY_PARENT_PID' in os.environ): # ipython-notebook return True else: return False
Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory
def get_ref_free_exc_info(): "Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory" type, val, tb = sys.exc_info() traceback.clear_frames(tb) return (type, val, tb)
Reclaim GPU RAM if CUDA out of memory happened, or execution was interrupted
def gpu_mem_restore(func): "Reclaim GPU RAM if CUDA out of memory happened, or execution was interrupted" @functools.wraps(func) def wrapper(*args, **kwargs): tb_clear_frames = os.environ.get('FASTAI_TB_CLEAR_FRAMES', None) if not IS_IN_IPYTHON or tb_clear_frames=="0": return func(*args, **kwargs) try: return func(*args, **kwargs) except Exception as e: if ("CUDA out of memory" in str(e) or "device-side assert triggered" in str(e) or tb_clear_frames == "1"): type, val, tb = get_ref_free_exc_info() # must! gc.collect() if "device-side assert triggered" in str(e): warn("""When 'device-side assert triggered' error happens, it's not possible to recover and you must restart the kernel to continue. Use os.environ['CUDA_LAUNCH_BLOCKING']="1" before restarting to debug""") raise type(val).with_traceback(tb) from None else: raise # re-raises the exact last exception return wrapper
Fits a model Arguments: model (model): any pytorch module net = to_gpu(net) data (ModelData): see ModelData class and subclasses (can be a list) opts: an optimizer. Example: optim.Adam. If n_epochs is a list, it needs to be the layer_optimizer to get the optimizer as it changes. n_epochs(int or list): number of epochs (or list of number of epochs) crit: loss function to optimize. Example: F.cross_entropy
def fit(model, data, n_epochs, opt, crit, metrics=None, callbacks=None, stepper=Stepper, swa_model=None, swa_start=None, swa_eval_freq=None, visualize=False, **kwargs): """ Fits a model Arguments: model (model): any pytorch module net = to_gpu(net) data (ModelData): see ModelData class and subclasses (can be a list) opts: an optimizer. Example: optim.Adam. If n_epochs is a list, it needs to be the layer_optimizer to get the optimizer as it changes. n_epochs(int or list): number of epochs (or list of number of epochs) crit: loss function to optimize. Example: F.cross_entropy """ seq_first = kwargs.pop('seq_first', False) all_val = kwargs.pop('all_val', False) get_ep_vals = kwargs.pop('get_ep_vals', False) validate_skip = kwargs.pop('validate_skip', 0) metrics = metrics or [] callbacks = callbacks or [] avg_mom=0.98 batch_num,avg_loss=0,0. for cb in callbacks: cb.on_train_begin() names = ["epoch", "trn_loss", "val_loss"] + [f.__name__ for f in metrics] if swa_model is not None: swa_names = ['swa_loss'] + [f'swa_{f.__name__}' for f in metrics] names += swa_names # will use this to call evaluate later swa_stepper = stepper(swa_model, None, crit, **kwargs) layout = "{!s:10} " * len(names) if not isinstance(n_epochs, Iterable): n_epochs=[n_epochs] if not isinstance(data, Iterable): data = [data] if len(data) == 1: data = data * len(n_epochs) for cb in callbacks: cb.on_phase_begin() model_stepper = stepper(model, opt.opt if hasattr(opt,'opt') else opt, crit, **kwargs) ep_vals = collections.OrderedDict() tot_epochs = int(np.ceil(np.array(n_epochs).sum())) cnt_phases = np.array([ep * len(dat.trn_dl) for (ep,dat) in zip(n_epochs,data)]).cumsum() phase = 0 for epoch in tnrange(tot_epochs, desc='Epoch'): if phase >= len(n_epochs): break #Sometimes cumulated errors make this append. model_stepper.reset(True) cur_data = data[phase] if hasattr(cur_data, 'trn_sampler'): cur_data.trn_sampler.set_epoch(epoch) if hasattr(cur_data, 'val_sampler'): cur_data.val_sampler.set_epoch(epoch) num_batch = len(cur_data.trn_dl) t = tqdm(iter(cur_data.trn_dl), leave=False, total=num_batch, miniters=0) if all_val: val_iter = IterBatch(cur_data.val_dl) for (*x,y) in t: batch_num += 1 for cb in callbacks: cb.on_batch_begin() loss = model_stepper.step(V(x),V(y), epoch) avg_loss = avg_loss * avg_mom + loss * (1-avg_mom) debias_loss = avg_loss / (1 - avg_mom**batch_num) t.set_postfix(loss=debias_loss, refresh=False) stop=False los = debias_loss if not all_val else [debias_loss] + validate_next(model_stepper,metrics, val_iter) for cb in callbacks: stop = stop or cb.on_batch_end(los) if stop: return if batch_num >= cnt_phases[phase]: for cb in callbacks: cb.on_phase_end() phase += 1 if phase >= len(n_epochs): t.close() break for cb in callbacks: cb.on_phase_begin() if isinstance(opt, LayerOptimizer): model_stepper.opt = opt.opt if cur_data != data[phase]: t.close() break if not all_val: vals = validate(model_stepper, cur_data.val_dl, metrics, epoch, seq_first=seq_first, validate_skip = validate_skip) stop=False for cb in callbacks: stop = stop or cb.on_epoch_end(vals) if swa_model is not None: if (epoch + 1) >= swa_start and ((epoch + 1 - swa_start) % swa_eval_freq == 0 or epoch == tot_epochs - 1): fix_batchnorm(swa_model, cur_data.trn_dl) swa_vals = validate(swa_stepper, cur_data.val_dl, metrics, epoch, validate_skip = validate_skip) vals += swa_vals if epoch > 0: print_stats(epoch, [debias_loss] + vals, visualize, prev_val) else: print(layout.format(*names)) print_stats(epoch, [debias_loss] + vals, visualize) prev_val = [debias_loss] + vals ep_vals = append_stats(ep_vals, epoch, [debias_loss] + vals) if stop: break for cb in callbacks: cb.on_train_end() if get_ep_vals: return vals, ep_vals else: return vals
Computes the loss on the next minibatch of the validation set.
def validate_next(stepper, metrics, val_iter): """Computes the loss on the next minibatch of the validation set.""" stepper.reset(False) with no_grad_context(): (*x,y) = val_iter.next() preds,l = stepper.evaluate(VV(x), VV(y)) res = [delistify(to_np(l))] res += [f(datafy(preds), datafy(y)) for f in metrics] stepper.reset(True) return res
Create link to documentation.
def link_type(arg_type, arg_name=None, include_bt:bool=True): "Create link to documentation." arg_name = arg_name or fn_name(arg_type) if include_bt: arg_name = code_esc(arg_name) if belongs_to_module(arg_type, 'torch') and ('Tensor' not in arg_name): return f'[{arg_name}]({get_pytorch_link(arg_type)})' if is_fastai_class(arg_type): return f'[{arg_name}]({get_fn_link(arg_type)})' return arg_name
Check if `t` belongs to `module_name`.
def belongs_to_module(t, module_name): "Check if `t` belongs to `module_name`." if hasattr(t, '__func__'): return belongs_to_module(t.__func__, module_name) if not inspect.getmodule(t): return False return inspect.getmodule(t).__name__.startswith(module_name)
Formats function param to `param1:Type=val`. Font weights: param1=bold, val=bold+italic
def format_param(p): "Formats function param to `param1:Type=val`. Font weights: param1=bold, val=bold+italic" arg_prefix = arg_prefixes.get(p.kind, '') # asterisk prefix for *args and **kwargs res = f"**{arg_prefix}{code_esc(p.name)}**" if hasattr(p, 'annotation') and p.annotation != p.empty: res += f':{anno_repr(p.annotation)}' if p.default != p.empty: default = getattr(p.default, 'func', p.default) default = getattr(default, '__name__', default) res += f'=***`{repr(default)}`***' return res
Format and link `func` definition to show in documentation
def format_ft_def(func, full_name:str=None)->str: "Format and link `func` definition to show in documentation" sig = inspect.signature(func) name = f'<code>{full_name or func.__name__}</code>' fmt_params = [format_param(param) for name,param in sig.parameters.items() if name not in ('self','cls')] arg_str = f"({', '.join(fmt_params)})" if sig.return_annotation and (sig.return_annotation != sig.empty): arg_str += f" → {anno_repr(sig.return_annotation)}" if is_fastai_class(type(func)): arg_str += f" :: {link_type(type(func))}" f_name = f"<code>class</code> {name}" if inspect.isclass(func) else name return f'{f_name}',f'{name}{arg_str}'
Formatted enum documentation.
def get_enum_doc(elt, full_name:str)->str: "Formatted enum documentation." vals = ', '.join(elt.__members__.keys()) return f'{code_esc(full_name)}',f'<code>Enum</code> = [{vals}]'
Class definition.
def get_cls_doc(elt, full_name:str)->str: "Class definition." parent_class = inspect.getclasstree([elt])[-1][0][1][0] name,args = format_ft_def(elt, full_name) if parent_class != object: args += f' :: {link_type(parent_class, include_bt=True)}' return name,args
Show documentation for element `elt`. Supported types: class, Callable, and enum.
def show_doc(elt, doc_string:bool=True, full_name:str=None, arg_comments:dict=None, title_level=None, alt_doc_string:str='', ignore_warn:bool=False, markdown=True, show_tests=True): "Show documentation for element `elt`. Supported types: class, Callable, and enum." arg_comments = ifnone(arg_comments, {}) anchor_id = get_anchor(elt) elt = getattr(elt, '__func__', elt) full_name = full_name or fn_name(elt) if inspect.isclass(elt): if is_enum(elt.__class__): name,args = get_enum_doc(elt, full_name) else: name,args = get_cls_doc(elt, full_name) elif isinstance(elt, Callable): name,args = format_ft_def(elt, full_name) else: raise Exception(f'doc definition not supported for {full_name}') source_link = get_function_source(elt) if is_fastai_class(elt) else "" test_link, test_modal = get_pytest_html(elt, anchor_id=anchor_id) if show_tests else ('', '') title_level = ifnone(title_level, 2 if inspect.isclass(elt) else 4) doc = f'<h{title_level} id="{anchor_id}" class="doc_header">{name}{source_link}{test_link}</h{title_level}>' doc += f'\n\n> {args}\n\n' doc += f'{test_modal}' if doc_string and (inspect.getdoc(elt) or arg_comments): doc += format_docstring(elt, arg_comments, alt_doc_string, ignore_warn) + ' ' if markdown: display(Markdown(doc)) else: return doc
Show `show_doc` info in preview window along with link to full docs.
def doc(elt): "Show `show_doc` info in preview window along with link to full docs." global use_relative_links use_relative_links = False elt = getattr(elt, '__func__', elt) md = show_doc(elt, markdown=False) if is_fastai_class(elt): md += f'\n\n<a href="{get_fn_link(elt)}" target="_blank" rel="noreferrer noopener">Show in docs</a>' output = HTMLExporter().markdown2html(md) use_relative_links = True if IS_IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output) else: try: page.page({'text/html': output}) except: display(Markdown(md))
Merge and format the docstring definition with `arg_comments` and `alt_doc_string`.
def format_docstring(elt, arg_comments:dict={}, alt_doc_string:str='', ignore_warn:bool=False)->str: "Merge and format the docstring definition with `arg_comments` and `alt_doc_string`." parsed = "" doc = parse_docstring(inspect.getdoc(elt)) description = alt_doc_string or f"{doc['short_description']} {doc['long_description']}" if description: parsed += f'\n\n{link_docstring(inspect.getmodule(elt), description)}' resolved_comments = {**doc.get('comments', {}), **arg_comments} # arg_comments takes priority args = inspect.getfullargspec(elt).args if not is_enum(elt.__class__) else elt.__members__.keys() if resolved_comments: parsed += '\n' for a in resolved_comments: parsed += f'\n- *{a}*: {resolved_comments[a]}' if a not in args and not ignore_warn: warn(f'Doc arg mismatch: {a}') return_comment = arg_comments.get('return') or doc.get('return') if return_comment: parsed += f'\n\n*return*: {return_comment}' return parsed
Search `docstring` for backticks and attempt to link those functions to respective documentation.
def link_docstring(modules, docstring:str, overwrite:bool=False)->str: "Search `docstring` for backticks and attempt to link those functions to respective documentation." mods = listify(modules) for mod in mods: _modvars.update(mod.__dict__) # concat all module definitions return re.sub(BT_REGEX, replace_link, docstring)
Attempt to resolve keywords such as Learner.lr_find. `match_last` starts matching from last component.
def find_elt(modvars, keyword, match_last=False): "Attempt to resolve keywords such as Learner.lr_find. `match_last` starts matching from last component." keyword = strip_fastai(keyword) if keyword in modvars: return modvars[keyword] comps = keyword.split('.') comp_elt = modvars.get(comps[0]) if hasattr(comp_elt, '__dict__'): return find_elt(comp_elt.__dict__, '.'.join(comps[1:]), match_last=match_last)
Return module from `mod_name`.
def import_mod(mod_name:str, ignore_errors=False): "Return module from `mod_name`." splits = str.split(mod_name, '.') try: if len(splits) > 1 : mod = importlib.import_module('.' + '.'.join(splits[1:]), splits[0]) else: mod = importlib.import_module(mod_name) return mod except: if not ignore_errors: print(f"Module {mod_name} doesn't exist.")
Show documentation for `ft_name`, see `show_doc`.
def show_doc_from_name(mod_name, ft_name:str, doc_string:bool=True, arg_comments:dict={}, alt_doc_string:str=''): "Show documentation for `ft_name`, see `show_doc`." mod = import_mod(mod_name) splits = str.split(ft_name, '.') assert hasattr(mod, splits[0]), print(f"Module {mod_name} doesn't have a function named {splits[0]}.") elt = getattr(mod, splits[0]) for i,split in enumerate(splits[1:]): assert hasattr(elt, split), print(f"Class {'.'.join(splits[:i+1])} doesn't have a function named {split}.") elt = getattr(elt, split) show_doc(elt, doc_string, ft_name, arg_comments, alt_doc_string)
Return all the functions of module `mod`.
def get_ft_names(mod, include_inner=False)->List[str]: "Return all the functions of module `mod`." # If the module has an attribute __all__, it picks those. # Otherwise, it returns all the functions defined inside a module. fn_names = [] for elt_name in get_exports(mod): elt = getattr(mod,elt_name) #This removes the files imported from elsewhere try: fname = inspect.getfile(elt) except: continue if mod.__file__.endswith('__init__.py'): if inspect.ismodule(elt): fn_names.append(elt_name) else: continue else: if (fname != mod.__file__): continue if inspect.isclass(elt) or inspect.isfunction(elt): fn_names.append(elt_name) else: continue if include_inner and inspect.isclass(elt) and not is_enum(elt.__class__): fn_names.extend(get_inner_fts(elt)) return fn_names
List the inner functions of a class.
def get_inner_fts(elt)->List[str]: "List the inner functions of a class." fts = [] for ft_name in elt.__dict__.keys(): if ft_name.startswith('_'): continue ft = getattr(elt, ft_name) if inspect.isfunction(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.ismethod(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.isclass(ft): fts += [f'{elt.__name__}.{n}' for n in get_inner_fts(ft)] return fts
Display table of contents for given `mod_name`.
def get_module_toc(mod_name): "Display table of contents for given `mod_name`." mod = import_mod(mod_name) ft_names = mod.__all__ if hasattr(mod,'__all__') else get_ft_names(mod) ft_names.sort(key = str.lower) tabmat = '' for ft_name in ft_names: tabmat += f'- [{ft_name}](#{ft_name})\n' elt = getattr(mod, ft_name) if inspect.isclass(elt) and not is_enum(elt.__class__): in_ft_names = get_inner_fts(elt) for name in in_ft_names: tabmat += f' - [{name}](#{name})\n' display(Markdown(tabmat))
Return function link to notebook documentation of `ft`. Private functions link to source code
def get_fn_link(ft)->str: "Return function link to notebook documentation of `ft`. Private functions link to source code" ft = getattr(ft, '__func__', ft) anchor = strip_fastai(get_anchor(ft)) module_name = strip_fastai(get_module_name(ft)) base = '' if use_relative_links else FASTAI_DOCS return f'{base}/{module_name}.html#{anchor}'
Returns link to pytorch docs of `ft`.
def get_pytorch_link(ft)->str: "Returns link to pytorch docs of `ft`." name = ft.__name__ ext = '.html' if name == 'device': return f'{PYTORCH_DOCS}tensor_attributes{ext}#torch-device' if name == 'Tensor': return f'{PYTORCH_DOCS}tensors{ext}#torch-tensor' if name.startswith('torchvision'): doc_path = get_module_name(ft).replace('.', '/') if inspect.ismodule(ft): name = name.replace('.', '-') return f'{PYTORCH_DOCS}{doc_path}{ext}#{name}' if name.startswith('torch.nn') and inspect.ismodule(ft): # nn.functional is special case nn_link = name.replace('.', '-') return f'{PYTORCH_DOCS}nn{ext}#{nn_link}' paths = get_module_name(ft).split('.') if len(paths) == 1: return f'{PYTORCH_DOCS}{paths[0]}{ext}#{paths[0]}.{name}' offset = 1 if paths[1] == 'utils' else 0 # utils is a pytorch special case doc_path = paths[1+offset] if inspect.ismodule(ft): return f'{PYTORCH_DOCS}{doc_path}{ext}#module-{name}' fnlink = '.'.join(paths[:(2+offset)]+[name]) return f'{PYTORCH_DOCS}{doc_path}{ext}#{fnlink}'
Returns github link for given file
def get_source_link(file, line, display_text="[source]", **kwargs)->str: "Returns github link for given file" link = f"{SOURCE_URL}{file}#L{line}" if display_text is None: return link return f'<a href="{link}" class="source_link" style="float:right">{display_text}</a>'
Returns link to `ft` in source code.
def get_function_source(ft, **kwargs)->str: "Returns link to `ft` in source code." try: line = inspect.getsourcelines(ft)[1] except Exception: return '' mod_path = get_module_name(ft).replace('.', '/') + '.py' return get_source_link(mod_path, line, **kwargs)
Look through the cell source for comments which affect nbval's behaviour Yield an iterable of ``(MARKER_TYPE, True)``.
def find_comment_markers(cellsource): """Look through the cell source for comments which affect nbval's behaviour Yield an iterable of ``(MARKER_TYPE, True)``. """ found = {} for line in cellsource.splitlines(): line = line.strip() if line.startswith('#'): # print("Found comment in '{}'".format(line)) comment = line.lstrip('#').strip() if comment in comment_markers: # print("Found marker {}".format(comment)) marker = comment_markers[comment] if not isinstance(marker, tuple): # If not an explicit tuple ('option', True/False), # imply ('option', True) marker = (marker, True) marker_type = marker[0] if marker_type in found: warnings.warn( "Conflicting comment markers found, using the latest: " " %s VS %s" % (found[marker_type], comment)) found[marker_type] = comment yield marker
Merge all stream outputs with shared names into single streams to ensure deterministic outputs. Parameters ---------- outputs : iterable of NotebookNodes Outputs being processed
def coalesce_streams(outputs): """ Merge all stream outputs with shared names into single streams to ensure deterministic outputs. Parameters ---------- outputs : iterable of NotebookNodes Outputs being processed """ if not outputs: return outputs new_outputs = [] streams = {} for output in outputs: if (output.output_type == 'stream'): if output.name in streams: streams[output.name].text += output.text else: new_outputs.append(output) streams[output.name] = output else: new_outputs.append(output) # process \r and \b characters for output in streams.values(): old = output.text while len(output.text) < len(old): old = output.text # Cancel out anything-but-newline followed by backspace output.text = backspace_pat.sub('', output.text) # Replace all carriage returns not followed by newline output.text = carriagereturn_pat.sub('', output.text) return new_outputs
Trim and hash base64 strings
def _trim_base64(s): """Trim and hash base64 strings""" if len(s) > 64 and _base64.match(s.replace('\n', '')): h = hash_string(s) s = '%s...<snip base64, md5=%s...>' % (s[:8], h[:16]) return s
Intent each line with indent
def _indent(s, indent=' '): """Intent each line with indent""" if isinstance(s, six.string_types): return '\n'.join(('%s%s' % (indent, line) for line in s.splitlines())) return s
Called by pytest to setup the collector cells in . Here we start a kernel and setup the sanitize patterns.
def setup(self): """ Called by pytest to setup the collector cells in . Here we start a kernel and setup the sanitize patterns. """ if self.parent.config.option.current_env: kernel_name = CURRENT_ENV_KERNEL_NAME else: kernel_name = self.nb.metadata.get( 'kernelspec', {}).get('name', 'python') self.kernel = RunningKernel(kernel_name, str(self.fspath.dirname)) self.setup_sanitize_files() if getattr(self.parent.config.option, 'cov_source', None): setup_coverage(self.parent.config, self.kernel, getattr(self, "fspath", None))
For each of the sanitize files that were specified as command line options load the contents of the file into the sanitise patterns dictionary.
def setup_sanitize_files(self): """ For each of the sanitize files that were specified as command line options load the contents of the file into the sanitise patterns dictionary. """ for fname in self.get_sanitize_files(): with open(fname, 'r') as f: self.sanitize_patterns.update(get_sanitize_patterns(f.read()))
Return list of all sanitize files provided by the user on the command line. N.B.: We only support one sanitize file at the moment, but this is likely to change in the future
def get_sanitize_files(self): """ Return list of all sanitize files provided by the user on the command line. N.B.: We only support one sanitize file at the moment, but this is likely to change in the future """ if self.parent.config.option.sanitize_with is not None: return [self.parent.config.option.sanitize_with] else: return []
Gets a message from the iopub channel of the notebook kernel.
def get_kernel_message(self, timeout=None, stream='iopub'): """ Gets a message from the iopub channel of the notebook kernel. """ return self.kernel.get_message(stream, timeout=timeout)
The collect function is required by pytest and is used to yield pytest Item objects. We specify an Item for each code cell in the notebook.
def collect(self): """ The collect function is required by pytest and is used to yield pytest Item objects. We specify an Item for each code cell in the notebook. """ self.nb = nbformat.read(str(self.fspath), as_version=4) # Start the cell count cell_num = 0 # Iterate over the cells in the notebook for cell in self.nb.cells: # Skip the cells that have text, headings or related stuff # Only test code cells if cell.cell_type == 'code': # The cell may contain a comment indicating that its output # should be checked or ignored. If it doesn't, use the default # behaviour. The --nbval option checks unmarked cells. with warnings.catch_warnings(record=True) as ws: options = defaultdict(bool, find_metadata_tags(cell.metadata)) comment_opts = dict(find_comment_markers(cell.source)) if set(comment_opts.keys()) & set(options.keys()): warnings.warn( "Overlapping options from comments and metadata, " "using options from comments: %s" % str(set(comment_opts.keys()) & set(options.keys()))) for w in ws: self.parent.config.warn( "C1", str(w.message), '%s:Cell %d' % ( getattr(self, "fspath", None), cell_num)) options.update(comment_opts) options.setdefault('check', self.compare_outputs) yield IPyNbCell('Cell ' + str(cell_num), self, cell_num, cell, options) # Update 'code' cell count cell_num += 1
Format an output for printing
def format_output_compare(self, key, left, right): """Format an output for printing""" if isinstance(left, six.string_types): left = _trim_base64(left) if isinstance(right, six.string_types): right = _trim_base64(right) cc = self.colors self.comparison_traceback.append( cc.OKBLUE + " mismatch '%s'" % key + cc.FAIL) # Use comparison repr from pytest: hook_result = self.ihook.pytest_assertrepr_compare( config=self.config, op='==', left=left, right=right) for new_expl in hook_result: if new_expl: new_expl = [' %s' % line.replace("\n", "\\n") for line in new_expl] self.comparison_traceback.append("\n assert reference_output == test_output failed:\n") self.comparison_traceback.extend(new_expl) break else: # Fallback repr: self.comparison_traceback.append( " <<<<<<<<<<<< Reference output from ipynb file:" + cc.ENDC) self.comparison_traceback.append(_indent(left)) self.comparison_traceback.append( cc.FAIL + ' ============ disagrees with newly computed (test) output:' + cc.ENDC) self.comparison_traceback.append(_indent(right)) self.comparison_traceback.append( cc.FAIL + ' >>>>>>>>>>>>') self.comparison_traceback.append(cc.ENDC)
called when self.runtest() raises an exception.
def repr_failure(self, excinfo): """ called when self.runtest() raises an exception. """ exc = excinfo.value cc = self.colors if isinstance(exc, NbCellError): msg_items = [ cc.FAIL + "Notebook cell execution failed" + cc.ENDC] formatstring = ( cc.OKBLUE + "Cell %d: %s\n\n" + "Input:\n" + cc.ENDC + "%s\n") msg_items.append(formatstring % ( exc.cell_num, str(exc), exc.source )) if exc.inner_traceback: msg_items.append(( cc.OKBLUE + "Traceback:" + cc.ENDC + "\n%s\n") % exc.inner_traceback) return "\n".join(msg_items) else: return "pytest plugin exception: %s" % str(exc)
sanitize a string for comparison.
def sanitize(self, s): """sanitize a string for comparison. """ if not isinstance(s, six.string_types): return s """ re.sub matches a regex and replaces it with another. The regex replacements are taken from a file if the option is passed when py.test is called. Otherwise, the strings are not processed """ for regex, replace in six.iteritems(self.parent.sanitize_patterns): s = re.sub(regex, replace, s) return s
Computes the outputs for several augmented inputs for TTA
def _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, scale:float=1.35) -> Iterator[List[Tensor]]: "Computes the outputs for several augmented inputs for TTA" dl = learn.dl(ds_type) ds = dl.dataset old = ds.tfms augm_tfm = [o for o in learn.data.train_ds.tfms if o.tfm not in (crop_pad, flip_lr, dihedral, zoom)] try: pbar = master_bar(range(8)) for i in pbar: row = 1 if i&1 else 0 col = 1 if i&2 else 0 flip = i&4 d = {'row_pct':row, 'col_pct':col, 'is_random':False} tfm = [*augm_tfm, zoom(scale=scale, **d), crop_pad(**d)] if flip: tfm.append(flip_lr(p=1.)) ds.tfms = tfm yield get_preds(learn.model, dl, pbar=pbar, activ=_loss_func2activ(learn.loss_func))[0] finally: ds.tfms = old
Applies TTA to predict on `ds_type` dataset.
def _TTA(learn:Learner, beta:float=0.4, scale:float=1.35, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False) -> Tensors: "Applies TTA to predict on `ds_type` dataset." preds,y = learn.get_preds(ds_type) all_preds = list(learn.tta_only(scale=scale, ds_type=ds_type)) avg_preds = torch.stack(all_preds).mean(0) if beta is None: return preds,avg_preds,y else: final_preds = preds*beta + avg_preds*(1-beta) if with_loss: with NoneReduceOnCPU(learn.loss_func) as lf: loss = lf(final_preds, y) return final_preds, y, loss return final_preds, y
Computes the f_beta between `preds` and `targets`
def fbeta(y_pred:Tensor, y_true:Tensor, thresh:float=0.2, beta:float=2, eps:float=1e-9, sigmoid:bool=True)->Rank0Tensor: "Computes the f_beta between `preds` and `targets`" beta2 = beta ** 2 if sigmoid: y_pred = y_pred.sigmoid() y_pred = (y_pred>thresh).float() y_true = y_true.float() TP = (y_pred*y_true).sum(dim=1) prec = TP/(y_pred.sum(dim=1)+eps) rec = TP/(y_true.sum(dim=1)+eps) res = (prec*rec)/(prec*beta2+rec+eps)*(1+beta2) return res.mean()
Compute accuracy with `targs` when `input` is bs * n_classes.
def accuracy(input:Tensor, targs:Tensor)->Rank0Tensor: "Compute accuracy with `targs` when `input` is bs * n_classes." n = targs.shape[0] input = input.argmax(dim=-1).view(n,-1) targs = targs.view(n,-1) return (input==targs).float().mean()
Compute accuracy when `y_pred` and `y_true` are the same size.
def accuracy_thresh(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor: "Compute accuracy when `y_pred` and `y_true` are the same size." if sigmoid: y_pred = y_pred.sigmoid() return ((y_pred>thresh)==y_true.byte()).float().mean()
Computes the Top-k accuracy (target is in the top k predictions).
def top_k_accuracy(input:Tensor, targs:Tensor, k:int=5)->Rank0Tensor: "Computes the Top-k accuracy (target is in the top k predictions)." input = input.topk(k=k, dim=-1)[1] targs = targs.unsqueeze(dim=-1).expand_as(input) return (input == targs).max(dim=-1)[0].float().mean()
Dice coefficient metric for binary target. If iou=True, returns iou metric, classic for segmentation problems.
def dice(input:Tensor, targs:Tensor, iou:bool=False)->Rank0Tensor: "Dice coefficient metric for binary target. If iou=True, returns iou metric, classic for segmentation problems." n = targs.shape[0] input = input.argmax(dim=1).view(n,-1) targs = targs.view(n,-1) intersect = (input * targs).sum().float() union = (input+targs).sum().float() if not iou: return (2. * intersect / union if union > 0 else union.new([1.]).squeeze()) else: return intersect / (union-intersect+1.0)
Exp RMSE between `pred` and `targ`.
def exp_rmspe(pred:Tensor, targ:Tensor)->Rank0Tensor: "Exp RMSE between `pred` and `targ`." pred,targ = flatten_check(pred,targ) pred, targ = torch.exp(pred), torch.exp(targ) pct_var = (targ - pred)/targ return torch.sqrt((pct_var**2).mean())
Mean absolute error between `pred` and `targ`.
def mean_absolute_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean absolute error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return torch.abs(targ - pred).mean()
Mean squared error between `pred` and `targ`.
def mean_squared_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean squared error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return F.mse_loss(pred, targ)
Root mean squared error between `pred` and `targ`.
def root_mean_squared_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Root mean squared error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return torch.sqrt(F.mse_loss(pred, targ))
Mean squared logarithmic error between `pred` and `targ`.
def mean_squared_logarithmic_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean squared logarithmic error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return F.mse_loss(torch.log(1 + pred), torch.log(1 + targ))
Explained variance between `pred` and `targ`.
def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor: "Explained variance between `pred` and `targ`." pred,targ = flatten_check(pred,targ) var_pct = torch.var(targ - pred) / torch.var(targ) return 1 - var_pct
R2 score (coefficient of determination) between `pred` and `targ`.
def r2_score(pred:Tensor, targ:Tensor)->Rank0Tensor: "R2 score (coefficient of determination) between `pred` and `targ`." pred,targ = flatten_check(pred,targ) u = torch.sum((targ - pred) ** 2) d = torch.sum((targ - targ.mean()) ** 2) return 1 - u / d
Using trapezoid method to calculate the area under roc curve
def auc_roc_score(input:Tensor, targ:Tensor): "Using trapezoid method to calculate the area under roc curve" fpr, tpr = roc_curve(input, targ) d = fpr[1:] - fpr[:-1] sl1, sl2 = [slice(None)], [slice(None)] sl1[-1], sl2[-1] = slice(1, None), slice(None, -1) return (d * (tpr[tuple(sl1)] + tpr[tuple(sl2)]) / 2.).sum(-1)
Returns the false positive and true positive rates
def roc_curve(input:Tensor, targ:Tensor): "Returns the false positive and true positive rates" targ = (targ == 1) desc_score_indices = torch.flip(input.argsort(-1), [-1]) input = input[desc_score_indices] targ = targ[desc_score_indices] d = input[1:] - input[:-1] distinct_value_indices = torch.nonzero(d).transpose(0,1)[0] threshold_idxs = torch.cat((distinct_value_indices, LongTensor([len(targ) - 1]).to(targ.device))) tps = torch.cumsum(targ * 1, dim=-1)[threshold_idxs] fps = (1 + threshold_idxs - tps) if tps[0] != 0 or fps[0] != 0: fps = torch.cat((LongTensor([0]), fps)) tps = torch.cat((LongTensor([0]), tps)) fpr, tpr = fps.float() / fps[-1], tps.float() / tps[-1] return fpr, tpr
convert iterable object into numpy array
def A(*a): """convert iterable object into numpy array""" return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
Convert numpy array into a pytorch tensor. if Cuda is available and USE_GPU=True, store resulting tensor in GPU.
def T(a, half=False, cuda=True): """ Convert numpy array into a pytorch tensor. if Cuda is available and USE_GPU=True, store resulting tensor in GPU. """ if not torch.is_tensor(a): a = np.array(np.ascontiguousarray(a)) if a.dtype in (np.int8, np.int16, np.int32, np.int64): a = torch.LongTensor(a.astype(np.int64)) elif a.dtype in (np.float32, np.float64): a = to_half(a) if half else torch.FloatTensor(a) else: raise NotImplementedError(a.dtype) if cuda: a = to_gpu(a) return a
equivalent to create_variable, which creates a pytorch tensor
def V_(x, requires_grad=False, volatile=False): '''equivalent to create_variable, which creates a pytorch tensor''' return create_variable(x, volatile=volatile, requires_grad=requires_grad)
creates a single or a list of pytorch tensors, depending on input x.
def V(x, requires_grad=False, volatile=False): '''creates a single or a list of pytorch tensors, depending on input x. ''' return map_over(x, lambda o: V_(o, requires_grad, volatile))
returns an np.array object given an input of np.array, list, tuple, torch variable or tensor.
def to_np(v): '''returns an np.array object given an input of np.array, list, tuple, torch variable or tensor.''' if isinstance(v, float): return np.array(v) if isinstance(v, (np.ndarray, np.generic)): return v if isinstance(v, (list,tuple)): return [to_np(o) for o in v] if isinstance(v, Variable): v=v.data if torch.cuda.is_available(): if is_half_tensor(v): v=v.float() if isinstance(v, torch.FloatTensor): v=v.float() return v.cpu().numpy()
puts pytorch variable to gpu, if cuda is available and USE_GPU is set to true.
def to_gpu(x, *args, **kwargs): '''puts pytorch variable to gpu, if cuda is available and USE_GPU is set to true. ''' return x.cuda(*args, **kwargs) if USE_GPU else x
A generator that returns sequence pieces, seperated by indexes specified in idxs.
def split_by_idxs(seq, idxs): '''A generator that returns sequence pieces, seperated by indexes specified in idxs. ''' last = 0 for idx in idxs: if not (-len(seq) <= idx < len(seq)): raise KeyError(f'Idx {idx} is out-of-bounds') yield seq[last:idx] last = idx yield seq[last:]
splits iterables a in equal parts of size sz
def partition(a, sz): """splits iterables a in equal parts of size sz""" return [a[i:i+sz] for i in range(0, len(a), sz)]
A generator that yields chunks of iterable, chunk_size at a time.
def chunk_iter(iterable, chunk_size): '''A generator that yields chunks of iterable, chunk_size at a time. ''' while True: chunk = [] try: for _ in range(chunk_size): chunk.append(next(iterable)) yield chunk except StopIteration: if chunk: yield chunk break
Apply `change` in brightness of image `x`.
def _brightness(x, change:uniform): "Apply `change` in brightness of image `x`." return x.add_(scipy.special.logit(change))
Rotate image by `degrees`.
def _rotate(degrees:uniform): "Rotate image by `degrees`." angle = degrees * math.pi / 180 return [[cos(angle), -sin(angle), 0.], [sin(angle), cos(angle), 0.], [0. , 0. , 1.]]
`sw`,`sh` scale width,height - `c`,`r` focus col,row.
def _get_zoom_mat(sw:float, sh:float, c:float, r:float)->AffineMatrix: "`sw`,`sh` scale width,height - `c`,`r` focus col,row." return [[sw, 0, c], [0, sh, r], [0, 0, 1.]]
Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom.
def _zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5): "Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom." s = 1-1/scale col_c = s * (2*col_pct - 1) row_c = s * (2*row_pct - 1) return _get_zoom_mat(1/scale, 1/scale, col_c, row_c)
Squish image by `scale`. `row_pct`,`col_pct` select focal point of zoom.
def _squish(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5): "Squish image by `scale`. `row_pct`,`col_pct` select focal point of zoom." if scale <= 1: col_c = (1-scale) * (2*col_pct - 1) return _get_zoom_mat(scale, 1, col_c, 0.) else: row_c = (1-1/scale) * (2*row_pct - 1) return _get_zoom_mat(1, 1/scale, 0., row_c)
Replace pixels by random neighbors at `magnitude`.
def _jitter(c, magnitude:uniform): "Replace pixels by random neighbors at `magnitude`." c.flow.add_((torch.rand_like(c.flow)-0.5)*magnitude*2) return c
Flip `x` horizontally.
def _flip_lr(x): "Flip `x` horizontally." #return x.flip(2) if isinstance(x, ImagePoints): x.flow.flow[...,0] *= -1 return x return tensor(np.ascontiguousarray(np.array(x)[...,::-1]))
Randomly flip `x` image based on `k`.
def _dihedral(x, k:partial(uniform_int,0,7)): "Randomly flip `x` image based on `k`." flips=[] if k&1: flips.append(1) if k&2: flips.append(2) if flips: x = torch.flip(x,flips) if k&4: x = x.transpose(1,2) return x.contiguous()
Randomly flip `x` image based on `k`.
def _dihedral_affine(k:partial(uniform_int,0,7)): "Randomly flip `x` image based on `k`." x = -1 if k&1 else 1 y = -1 if k&2 else 1 if k&4: return [[0, x, 0.], [y, 0, 0], [0, 0, 1.]] return [[x, 0, 0.], [0, y, 0], [0, 0, 1.]]
Pad `x` with `padding` pixels. `mode` fills in space ('zeros','reflection','border').
def _pad_default(x, padding:int, mode='reflection'): "Pad `x` with `padding` pixels. `mode` fills in space ('zeros','reflection','border')." mode = _pad_mode_convert[mode] return F.pad(x[None], (padding,)*4, mode=mode)[0]
Cut out `n_holes` number of square holes of size `length` in image at random locations.
def _cutout(x, n_holes:uniform_int=1, length:uniform_int=40): "Cut out `n_holes` number of square holes of size `length` in image at random locations." h,w = x.shape[1:] for n in range(n_holes): h_y = np.random.randint(0, h) h_x = np.random.randint(0, w) y1 = int(np.clip(h_y - length / 2, 0, h)) y2 = int(np.clip(h_y + length / 2, 0, h)) x1 = int(np.clip(h_x - length / 2, 0, w)) x2 = int(np.clip(h_x + length / 2, 0, w)) x[:, y1:y2, x1:x2] = 0 return x
Randomize one of the channels of the input image
def _rgb_randomize(x, channel:int=None, thresh:float=0.3): "Randomize one of the channels of the input image" if channel is None: channel = np.random.randint(0, x.shape[0] - 1) x[channel] = torch.rand(x.shape[1:]) * np.random.uniform(0, thresh) return x
Crop `x` to `size` pixels. `row_pct`,`col_pct` select focal point of crop.
def _crop_default(x, size, row_pct:uniform=0.5, col_pct:uniform=0.5): "Crop `x` to `size` pixels. `row_pct`,`col_pct` select focal point of crop." rows,cols = tis2hw(size) row_pct,col_pct = _minus_epsilon(row_pct,col_pct) row = int((x.size(1)-rows+1) * row_pct) col = int((x.size(2)-cols+1) * col_pct) return x[:, row:row+rows, col:col+cols].contiguous()
Crop and pad tfm - `row_pct`,`col_pct` sets focal point.
def _crop_pad_default(x, size, padding_mode='reflection', row_pct:uniform = 0.5, col_pct:uniform = 0.5): "Crop and pad tfm - `row_pct`,`col_pct` sets focal point." padding_mode = _pad_mode_convert[padding_mode] size = tis2hw(size) if x.shape[1:] == torch.Size(size): return x rows,cols = size row_pct,col_pct = _minus_epsilon(row_pct,col_pct) if x.size(1)<rows or x.size(2)<cols: row_pad = max((rows-x.size(1)+1)//2, 0) col_pad = max((cols-x.size(2)+1)//2, 0) x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0] row = int((x.size(1)-rows+1)*row_pct) col = int((x.size(2)-cols+1)*col_pct) x = x[:, row:row+rows, col:col+cols] return x.contiguous()