Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def anno_parser(func): "Look at params (annotated with `Param`) in func and return an `ArgumentParser`" p = ArgumentParser(description=func.__doc__) for k,v in inspect.signature(func).parameters.items(): param = func.__annotations__.get(k, Param()) kwargs = param.kwargs if v.default != inspect.Parameter.empty: kwargs['default'] = v.default p.add_argument(f"{param.pre}{k}", **kwargs) return p
[]
Please provide a description of the function:def call_parse(func): "Decorator to create a simple CLI from `func` using `anno_parser`" name = inspect.currentframe().f_back.f_globals['__name__'] if name == "__main__": args = anno_parser(func).parse_args() func(**args.__dict__) else: return func
[]
Please provide a description of the function:def call_plac(f): "Decorator to create a simple CLI from `func` using `plac`" name = inspect.currentframe().f_back.f_globals['__name__'] if name == '__main__': import plac res = plac.call(f) if callable(res): res() else: return f
[]
Please provide a description of the function:def numericalize_tok(tokens, max_vocab=50000, min_freq=0, unk_tok="_unk_", pad_tok="_pad_", bos_tok="_bos_", eos_tok="_eos_"): if isinstance(tokens, str): raise ValueError("Expected to receive a list of tokens. Received a string instead") if isinstance(tokens[0], list): tokens = [p for o in tokens for p in o] freq = Counter(tokens) int2tok = [o for o,c in freq.most_common(max_vocab) if c>min_freq] unk_id = 3 int2tok.insert(0, bos_tok) int2tok.insert(1, pad_tok) int2tok.insert(2, eos_tok) int2tok.insert(unk_id, unk_tok) tok2int = collections.defaultdict(lambda:unk_id, {v:k for k,v in enumerate(int2tok)}) return int2tok, tok2int
[ "Takes in text tokens and returns int2tok and tok2int converters\n\n Arguments:\n tokens(list): List of tokens. Can be a list of strings, or a list of lists of strings.\n max_vocab(int): Number of tokens to return in the vocab (sorted by frequency)\n min_freq(int): Minimum number of instances a token must be present in order to be preserved.\n unk_tok(str): Token to use when unknown tokens are encountered in the source text.\n pad_tok(str): Token to use when padding sequences.\n " ]
Please provide a description of the function:def reset(self): "If your convolutional window is greater than 1 and you save previous xs, you must reset at the beginning of each new sequence." for layer in self.layers: layer.reset() if self.bidirectional: for layer in self.layers_bwd: layer.reset()
[]
Please provide a description of the function:def start_new_kernel(startup_timeout=60, kernel_name='python', **kwargs): logger.debug('Starting new kernel: "%s"' % kernel_name) km = KernelManager(kernel_name=kernel_name, kernel_spec_manager=NbvalKernelspecManager()) km.start_kernel(**kwargs) kc = km.client() kc.start_channels() try: kc.wait_for_ready(timeout=startup_timeout) except RuntimeError: logger.exception('Failure starting kernel "%s"', kernel_name) kc.stop_channels() km.shutdown_kernel() raise return km, kc
[ "Start a new kernel, and return its Manager and Client" ]
Please provide a description of the function:def get_kernel_spec(self, kernel_name): if kernel_name == CURRENT_ENV_KERNEL_NAME: return self.kernel_spec_class( resource_dir=ipykernel.kernelspec.RESOURCES, **ipykernel.kernelspec.get_kernel_dict()) else: return super(NbvalKernelspecManager, self).get_kernel_spec(kernel_name)
[ "Returns a :class:`KernelSpec` instance for the given kernel_name.\n\n Raises :exc:`NoSuchKernel` if the given kernel name is not found.\n " ]
Please provide a description of the function:def get_message(self, stream, timeout=None): try: if stream == 'iopub': msg = self.kc.get_iopub_msg(timeout=timeout) elif stream == 'shell': msg = self.kc.get_shell_msg(timeout=timeout) else: raise ValueError('Invalid stream specified: "%s"' % stream) except Empty: logger.debug('Kernel: Timeout waiting for message on %s', stream) raise logger.debug("Kernel message (%s):\n%s", stream, pformat(msg)) return msg
[ "\n Function is used to get a message from the iopub channel.\n Timeout is None by default\n When timeout is reached\n " ]
Please provide a description of the function:def execute_cell_input(self, cell_input, allow_stdin=None): if cell_input: logger.debug('Executing cell: "%s"...', cell_input.splitlines()[0][:40]) else: logger.debug('Executing empty cell') return self.kc.execute(cell_input, allow_stdin=allow_stdin, stop_on_error=False)
[ "\n Executes a string of python code in cell input.\n We do not allow the kernel to make requests to the stdin\n this is the norm for notebooks\n\n Function returns a unique message id of the reply from\n the kernel.\n " ]
Please provide a description of the function:def await_reply(self, msg_id, timeout=None): while True: msg = self.get_message(stream='shell', timeout=timeout) # Is this the message we are waiting for? if msg['parent_header'].get('msg_id') == msg_id: if msg['content']['status'] == 'aborted': # This should not occur! raise RuntimeError('Kernel aborted execution request') return
[ "\n Continuously poll the kernel 'shell' stream for messages until:\n - It receives an 'execute_reply' status for the given message id\n - The timeout is reached awaiting a message, in which case\n a `Queue.Empty` exception will be raised.\n " ]
Please provide a description of the function:def await_idle(self, parent_id, timeout): while True: # Get a message from the kernel iopub channel msg = self.get_message(timeout=timeout, stream='iopub') # raises Empty on timeout! if msg['parent_header'].get('msg_id') != parent_id: continue if msg['msg_type'] == 'status': if msg['content']['execution_state'] == 'idle': break
[ "Poll the iopub stream until an idle message is received for the given parent ID" ]
Please provide a description of the function:def stop(self): logger.debug('Stopping kernel') self.kc.stop_channels() self.km.shutdown_kernel(now=True) del self.km
[ "\n Instructs the kernel process to stop channels\n and the kernel manager to then shutdown the process.\n " ]
Please provide a description of the function:def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42): np.random.seed(seed) n_val = int(val_pct*n) idx_start = cv_idx*n_val idxs = np.random.permutation(n) return idxs[idx_start:idx_start+n_val]
[ " Get a list of index values for Validation set from a dataset\n \n Arguments:\n n : int, Total number of elements in the data set.\n cv_idx : int, starting index [idx_start = cv_idx*int(val_pct*n)] \n val_pct : (int, float), validation set percentage \n seed : seed value for RandomState\n \n Returns:\n list of indexes \n " ]
Please provide a description of the function:def resize_img(fname, targ, path, new_path, fn=None): if fn is None: fn = resize_fn(targ) dest = os.path.join(path_for(path, new_path, targ), fname) if os.path.exists(dest): return im = Image.open(os.path.join(path, fname)).convert('RGB') os.makedirs(os.path.split(dest)[0], exist_ok=True) fn(im).save(dest)
[ "\n Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ.\n " ]
Please provide a description of the function:def resize_imgs(fnames, targ, path, new_path, resume=True, fn=None): target_path = path_for(path, new_path, targ) if resume: subdirs = {os.path.dirname(p) for p in fnames} subdirs = {s for s in subdirs if os.path.exists(os.path.join(target_path, s))} already_resized_fnames = set() for subdir in subdirs: files = [os.path.join(subdir, file) for file in os.listdir(os.path.join(target_path, subdir))] already_resized_fnames.update(set(files)) original_fnames = set(fnames) fnames = list(original_fnames - already_resized_fnames) errors = {} def safely_process(fname): try: resize_img(fname, targ, path, new_path, fn=fn) except Exception as ex: errors[fname] = str(ex) if len(fnames) > 0: with ThreadPoolExecutor(num_cpus()) as e: ims = e.map(lambda fname: safely_process(fname), fnames) for _ in tqdm(ims, total=len(fnames), leave=False): pass if errors: print('Some images failed to process:') print(json.dumps(errors, indent=2)) return os.path.join(path,new_path,str(targ))
[ "\n Enlarge or shrink a set of images in the same directory to scale, such that the smaller of the height or width dimension is equal to targ.\n Note: \n -- This function is multithreaded for efficiency. \n -- When destination file or folder already exist, function exists without raising an error. \n " ]
Please provide a description of the function:def read_dir(path, folder): full_path = os.path.join(path, folder) fnames = glob(f"{full_path}/*.*") directories = glob(f"{full_path}/*/") if any(fnames): return [os.path.relpath(f,path) for f in fnames] elif any(directories): raise FileNotFoundError("{} has subdirectories but contains no files. Is your directory structure is correct?".format(full_path)) else: raise FileNotFoundError("{} folder doesn't exist or is empty".format(full_path))
[ " Returns a list of relative file paths to `path` for all files within `folder` " ]
Please provide a description of the function:def n_hot(ids, c): ''' one hot encoding by index. Returns array of length c, where all entries are 0, except for the indecies in ids ''' res = np.zeros((c,), dtype=np.float32) res[ids] = 1 return res
[]
Please provide a description of the function:def folder_source(path, folder): fnames, lbls, all_lbls = read_dirs(path, folder) lbl2idx = {lbl:idx for idx,lbl in enumerate(all_lbls)} idxs = [lbl2idx[lbl] for lbl in lbls] lbl_arr = np.array(idxs, dtype=int) return fnames, lbl_arr, all_lbls
[ "\n Returns the filenames and labels for a folder within a path\n \n Returns:\n -------\n fnames: a list of the filenames within `folder`\n all_lbls: a list of all of the labels in `folder`, where the # of labels is determined by the # of directories within `folder`\n lbl_arr: a numpy array of the label indices in `all_lbls`\n " ]
Please provide a description of the function:def parse_csv_labels(fn, skip_header=True, cat_separator = ' '): df = pd.read_csv(fn, index_col=0, header=0 if skip_header else None, dtype=str) fnames = df.index.values df.iloc[:,0] = df.iloc[:,0].str.split(cat_separator) return fnames, list(df.to_dict().values())[0]
[ "Parse filenames and label sets from a CSV file.\n\n This method expects that the csv file at path :fn: has two columns. If it\n has a header, :skip_header: should be set to True. The labels in the\n label set are expected to be space separated.\n\n Arguments:\n fn: Path to a CSV file.\n skip_header: A boolean flag indicating whether to skip the header.\n\n Returns:\n a two-tuple of (\n image filenames,\n a dictionary of filenames and corresponding labels\n )\n .\n :param cat_separator: the separator for the categories column\n " ]
Please provide a description of the function:def isdicom(fn): '''True if the fn points to a DICOM image''' fn = str(fn) if fn.endswith('.dcm'): return True # Dicom signature from the dicom spec. with open(fn,'rb') as fh: fh.seek(0x80) return fh.read(4)==b'DICM'
[]
Please provide a description of the function:def open_image(fn): flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR if not os.path.exists(fn) and not str(fn).startswith("http"): raise OSError('No such file or directory: {}'.format(fn)) elif os.path.isdir(fn) and not str(fn).startswith("http"): raise OSError('Is a directory: {}'.format(fn)) elif isdicom(fn): slice = pydicom.read_file(fn) if slice.PhotometricInterpretation.startswith('MONOCHROME'): # Make a fake RGB image im = np.stack([slice.pixel_array]*3,-1) return im / ((1 << slice.BitsStored)-1) else: # No support for RGB yet, as it involves various color spaces. # It shouldn't be too difficult to add though, if needed. raise OSError('Unsupported DICOM image with PhotometricInterpretation=={}'.format(slice.PhotometricInterpretation)) else: #res = np.array(Image.open(fn), dtype=np.float32)/255 #if len(res.shape)==2: res = np.repeat(res[...,None],3,2) #return res try: if str(fn).startswith("http"): req = urllib.urlopen(str(fn)) image = np.asarray(bytearray(req.read()), dtype="uint8") im = cv2.imdecode(image, flags).astype(np.float32)/255 else: im = cv2.imread(str(fn), flags).astype(np.float32)/255 if im is None: raise OSError(f'File not recognized by opencv: {fn}') return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) except Exception as e: raise OSError('Error handling image at: {}'.format(fn)) from e
[ " Opens an image using OpenCV given the file path.\n\n Arguments:\n fn: the file path of the image\n\n Returns:\n The image in RGB format as numpy array of floats normalized to range between 0.0 - 1.0\n " ]
Please provide a description of the function:def split_by_idx(idxs, *a): mask = np.zeros(len(a[0]),dtype=bool) mask[np.array(idxs)] = True return [(o[mask],o[~mask]) for o in a]
[ "\n Split each array passed as *a, to a pair of arrays like this (elements selected by idxs, the remaining elements)\n This can be used to split multiple arrays containing training data to validation and training set.\n\n :param idxs [int]: list of indexes selected\n :param a list: list of np.array, each array should have same amount of elements in the first dimension\n :return: list of tuples, each containing a split of corresponding array from *a.\n First element of each tuple is an array composed from elements selected by idxs,\n second element is an array of remaining elements.\n " ]
Please provide a description of the function:def resize_imgs(self, targ, new_path, resume=True, fn=None): dest = resize_imgs(self.fnames, targ, self.path, new_path, resume, fn) return self.__class__(self.fnames, self.y, self.transform, dest)
[ "\n resize all images in the dataset and save them to `new_path`\n \n Arguments:\n targ (int): the target size\n new_path (string): the new folder to save the images\n resume (bool): if true (default), allow resuming a partial resize operation by checking for the existence\n of individual images rather than the existence of the directory\n fn (function): custom resizing function Img -> Img\n " ]
Please provide a description of the function:def denorm(self,arr): if type(arr) is not np.ndarray: arr = to_np(arr) if len(arr.shape)==3: arr = arr[None] return self.transform.denorm(np.rollaxis(arr,1,4))
[ "Reverse the normalization done to a batch of images.\n\n Arguments:\n arr: of shape/size (N,3,sz,sz)\n " ]
Please provide a description of the function:def resized(self, dl, targ, new_path, resume = True, fn=None): return dl.dataset.resize_imgs(targ, new_path, resume=resume, fn=fn) if dl else None
[ "\n Return a copy of this dataset resized\n " ]
Please provide a description of the function:def resize(self, targ_sz, new_path='tmp', resume=True, fn=None): new_ds = [] dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl] if self.test_dl: dls += [self.test_dl, self.test_aug_dl] else: dls += [None,None] t = tqdm_notebook(dls) for dl in t: new_ds.append(self.resized(dl, targ_sz, new_path, resume, fn)) t.close() return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)
[ "\n Resizes all the images in the train, valid, test folders to a given size.\n\n Arguments:\n targ_sz (int): the target size\n new_path (str): the path to save the resized images (default tmp)\n resume (bool): if True, check for images in the DataSet that haven't been resized yet (useful if a previous resize\n operation was aborted)\n fn (function): optional custom resizing function\n " ]
Please provide a description of the function:def from_arrays(cls, path, trn, val, bs=64, tfms=(None,None), classes=None, num_workers=4, test=None, continuous=False): f = ArraysIndexRegressionDataset if continuous else ArraysIndexDataset datasets = cls.get_ds(f, trn, val, tfms, test=test) return cls(path, datasets, bs, num_workers, classes=classes)
[ " Read in images and their labels given as numpy arrays\n\n Arguments:\n path: a root path of the data (used for storing trained models, precomputed values, etc)\n trn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the\n shape of `(5000, 784)` and `y` has the shape of `(5000,)`)\n val: a tuple of validation data matrix and target label/classification array.\n bs: batch size\n tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`\n classes: a list of all labels/classifications\n num_workers: a number of workers\n test: a matrix of test data (the shape should match `trn[0]`)\n\n Returns:\n ImageClassifierData\n " ]
Please provide a description of the function:def from_paths(cls, path, bs=64, tfms=(None,None), trn_name='train', val_name='valid', test_name=None, test_with_labels=False, num_workers=8): assert not(tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" trn,val = [folder_source(path, o) for o in (trn_name, val_name)] if test_name: test = folder_source(path, test_name) if test_with_labels else read_dir(path, test_name) else: test = None datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test) return cls(path, datasets, bs, num_workers, classes=trn[2])
[ " Read in images and their labels given as sub-folder names\n\n Arguments:\n path: a root path of the data (used for storing trained models, precomputed values, etc)\n bs: batch size\n tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`\n trn_name: a name of the folder that contains training images.\n val_name: a name of the folder that contains validation images.\n test_name: a name of the folder that contains test images.\n num_workers: number of workers\n\n Returns:\n ImageClassifierData\n " ]
Please provide a description of the function:def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None), val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8, cat_separator=' '): assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" assert not (os.path.isabs(folder)), "folder needs to be a relative path" fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous, cat_separator=cat_separator) return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, suffix=suffix, tfms=tfms, bs=bs, continuous=continuous)
[ " Read in images and their labels given as a CSV file.\n\n This method should be used when training image labels are given in an CSV file as opposed to\n sub-directories with label names.\n\n Arguments:\n path: a root path of the data (used for storing trained models, precomputed values, etc)\n folder: a name of the folder in which training images are contained.\n csv_fname: a name of the CSV file which contains target labels.\n bs: batch size\n tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`\n val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.\n If None, default arguments to get_cv_idxs are used.\n suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file\n extension e.g. '.jpg' - in which case, you can set suffix as '.jpg')\n test_name: a name of the folder which contains test images.\n continuous: if True, the data set is used to train regression models. If False, it is used \n to train classification models.\n skip_header: skip the first row of the CSV file.\n num_workers: number of workers\n cat_separator: Labels category separator\n\n Returns:\n ImageClassifierData\n " ]
Please provide a description of the function:def from_path_and_array(cls, path, folder, y, classes=None, val_idxs=None, test_name=None, num_workers=8, tfms=(None,None), bs=64): assert not (tfms[0] is None or tfms[1] is None), "please provide transformations for your train and validation sets" assert not (os.path.isabs(folder)), "folder needs to be a relative path" fnames = np.core.defchararray.add(f'{folder}/', sorted(os.listdir(f'{path}{folder}'))) return cls.from_names_and_array(path, fnames, y, classes, val_idxs, test_name, num_workers=num_workers, tfms=tfms, bs=bs)
[ " Read in images given a sub-folder and their labels given a numpy array\n\n Arguments:\n path: a root path of the data (used for storing trained models, precomputed values, etc)\n folder: a name of the folder in which training images are contained.\n y: numpy array which contains target labels ordered by filenames.\n bs: batch size\n tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`\n val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`.\n If None, default arguments to get_cv_idxs are used.\n test_name: a name of the folder which contains test images.\n num_workers: number of workers\n\n Returns:\n ImageClassifierData\n " ]
Please provide a description of the function:def is_in_ipython(): "Is the code running in the ipython environment (jupyter including)" program_name = os.path.basename(os.getenv('_', '')) if ('jupyter-notebook' in program_name or # jupyter-notebook 'ipython' in program_name or # ipython 'JPY_PARENT_PID' in os.environ): # ipython-notebook return True else: return False
[]
Please provide a description of the function:def get_ref_free_exc_info(): "Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory" type, val, tb = sys.exc_info() traceback.clear_frames(tb) return (type, val, tb)
[]
Please provide a description of the function:def gpu_mem_restore(func): "Reclaim GPU RAM if CUDA out of memory happened, or execution was interrupted" @functools.wraps(func) def wrapper(*args, **kwargs): tb_clear_frames = os.environ.get('FASTAI_TB_CLEAR_FRAMES', None) if not IS_IN_IPYTHON or tb_clear_frames=="0": return func(*args, **kwargs) try: return func(*args, **kwargs) except Exception as e: if ("CUDA out of memory" in str(e) or "device-side assert triggered" in str(e) or tb_clear_frames == "1"): type, val, tb = get_ref_free_exc_info() # must! gc.collect() if "device-side assert triggered" in str(e): warn() raise type(val).with_traceback(tb) from None else: raise # re-raises the exact last exception return wrapper
[ "When 'device-side assert triggered' error happens, it's not possible to recover and you must restart the kernel to continue. Use os.environ['CUDA_LAUNCH_BLOCKING']=\"1\" before restarting to debug" ]
Please provide a description of the function:def fit(model, data, n_epochs, opt, crit, metrics=None, callbacks=None, stepper=Stepper, swa_model=None, swa_start=None, swa_eval_freq=None, visualize=False, **kwargs): seq_first = kwargs.pop('seq_first', False) all_val = kwargs.pop('all_val', False) get_ep_vals = kwargs.pop('get_ep_vals', False) validate_skip = kwargs.pop('validate_skip', 0) metrics = metrics or [] callbacks = callbacks or [] avg_mom=0.98 batch_num,avg_loss=0,0. for cb in callbacks: cb.on_train_begin() names = ["epoch", "trn_loss", "val_loss"] + [f.__name__ for f in metrics] if swa_model is not None: swa_names = ['swa_loss'] + [f'swa_{f.__name__}' for f in metrics] names += swa_names # will use this to call evaluate later swa_stepper = stepper(swa_model, None, crit, **kwargs) layout = "{!s:10} " * len(names) if not isinstance(n_epochs, Iterable): n_epochs=[n_epochs] if not isinstance(data, Iterable): data = [data] if len(data) == 1: data = data * len(n_epochs) for cb in callbacks: cb.on_phase_begin() model_stepper = stepper(model, opt.opt if hasattr(opt,'opt') else opt, crit, **kwargs) ep_vals = collections.OrderedDict() tot_epochs = int(np.ceil(np.array(n_epochs).sum())) cnt_phases = np.array([ep * len(dat.trn_dl) for (ep,dat) in zip(n_epochs,data)]).cumsum() phase = 0 for epoch in tnrange(tot_epochs, desc='Epoch'): if phase >= len(n_epochs): break #Sometimes cumulated errors make this append. model_stepper.reset(True) cur_data = data[phase] if hasattr(cur_data, 'trn_sampler'): cur_data.trn_sampler.set_epoch(epoch) if hasattr(cur_data, 'val_sampler'): cur_data.val_sampler.set_epoch(epoch) num_batch = len(cur_data.trn_dl) t = tqdm(iter(cur_data.trn_dl), leave=False, total=num_batch, miniters=0) if all_val: val_iter = IterBatch(cur_data.val_dl) for (*x,y) in t: batch_num += 1 for cb in callbacks: cb.on_batch_begin() loss = model_stepper.step(V(x),V(y), epoch) avg_loss = avg_loss * avg_mom + loss * (1-avg_mom) debias_loss = avg_loss / (1 - avg_mom**batch_num) t.set_postfix(loss=debias_loss, refresh=False) stop=False los = debias_loss if not all_val else [debias_loss] + validate_next(model_stepper,metrics, val_iter) for cb in callbacks: stop = stop or cb.on_batch_end(los) if stop: return if batch_num >= cnt_phases[phase]: for cb in callbacks: cb.on_phase_end() phase += 1 if phase >= len(n_epochs): t.close() break for cb in callbacks: cb.on_phase_begin() if isinstance(opt, LayerOptimizer): model_stepper.opt = opt.opt if cur_data != data[phase]: t.close() break if not all_val: vals = validate(model_stepper, cur_data.val_dl, metrics, epoch, seq_first=seq_first, validate_skip = validate_skip) stop=False for cb in callbacks: stop = stop or cb.on_epoch_end(vals) if swa_model is not None: if (epoch + 1) >= swa_start and ((epoch + 1 - swa_start) % swa_eval_freq == 0 or epoch == tot_epochs - 1): fix_batchnorm(swa_model, cur_data.trn_dl) swa_vals = validate(swa_stepper, cur_data.val_dl, metrics, epoch, validate_skip = validate_skip) vals += swa_vals if epoch > 0: print_stats(epoch, [debias_loss] + vals, visualize, prev_val) else: print(layout.format(*names)) print_stats(epoch, [debias_loss] + vals, visualize) prev_val = [debias_loss] + vals ep_vals = append_stats(ep_vals, epoch, [debias_loss] + vals) if stop: break for cb in callbacks: cb.on_train_end() if get_ep_vals: return vals, ep_vals else: return vals
[ " Fits a model\n\n Arguments:\n model (model): any pytorch module\n net = to_gpu(net)\n data (ModelData): see ModelData class and subclasses (can be a list)\n opts: an optimizer. Example: optim.Adam. \n If n_epochs is a list, it needs to be the layer_optimizer to get the optimizer as it changes.\n n_epochs(int or list): number of epochs (or list of number of epochs)\n crit: loss function to optimize. Example: F.cross_entropy\n " ]
Please provide a description of the function:def validate_next(stepper, metrics, val_iter): stepper.reset(False) with no_grad_context(): (*x,y) = val_iter.next() preds,l = stepper.evaluate(VV(x), VV(y)) res = [delistify(to_np(l))] res += [f(datafy(preds), datafy(y)) for f in metrics] stepper.reset(True) return res
[ "Computes the loss on the next minibatch of the validation set." ]
Please provide a description of the function:def link_type(arg_type, arg_name=None, include_bt:bool=True): "Create link to documentation." arg_name = arg_name or fn_name(arg_type) if include_bt: arg_name = code_esc(arg_name) if belongs_to_module(arg_type, 'torch') and ('Tensor' not in arg_name): return f'[{arg_name}]({get_pytorch_link(arg_type)})' if is_fastai_class(arg_type): return f'[{arg_name}]({get_fn_link(arg_type)})' return arg_name
[]
Please provide a description of the function:def belongs_to_module(t, module_name): "Check if `t` belongs to `module_name`." if hasattr(t, '__func__'): return belongs_to_module(t.__func__, module_name) if not inspect.getmodule(t): return False return inspect.getmodule(t).__name__.startswith(module_name)
[]
Please provide a description of the function:def format_param(p): "Formats function param to `param1:Type=val`. Font weights: param1=bold, val=bold+italic" arg_prefix = arg_prefixes.get(p.kind, '') # asterisk prefix for *args and **kwargs res = f"**{arg_prefix}{code_esc(p.name)}**" if hasattr(p, 'annotation') and p.annotation != p.empty: res += f':{anno_repr(p.annotation)}' if p.default != p.empty: default = getattr(p.default, 'func', p.default) default = getattr(default, '__name__', default) res += f'=***`{repr(default)}`***' return res
[]
Please provide a description of the function:def format_ft_def(func, full_name:str=None)->str: "Format and link `func` definition to show in documentation" sig = inspect.signature(func) name = f'<code>{full_name or func.__name__}</code>' fmt_params = [format_param(param) for name,param in sig.parameters.items() if name not in ('self','cls')] arg_str = f"({', '.join(fmt_params)})" if sig.return_annotation and (sig.return_annotation != sig.empty): arg_str += f" → {anno_repr(sig.return_annotation)}" if is_fastai_class(type(func)): arg_str += f" :: {link_type(type(func))}" f_name = f"<code>class</code> {name}" if inspect.isclass(func) else name return f'{f_name}',f'{name}{arg_str}'
[]
Please provide a description of the function:def get_enum_doc(elt, full_name:str)->str: "Formatted enum documentation." vals = ', '.join(elt.__members__.keys()) return f'{code_esc(full_name)}',f'<code>Enum</code> = [{vals}]'
[]
Please provide a description of the function:def get_cls_doc(elt, full_name:str)->str: "Class definition." parent_class = inspect.getclasstree([elt])[-1][0][1][0] name,args = format_ft_def(elt, full_name) if parent_class != object: args += f' :: {link_type(parent_class, include_bt=True)}' return name,args
[]
Please provide a description of the function:def show_doc(elt, doc_string:bool=True, full_name:str=None, arg_comments:dict=None, title_level=None, alt_doc_string:str='', ignore_warn:bool=False, markdown=True, show_tests=True): "Show documentation for element `elt`. Supported types: class, Callable, and enum." arg_comments = ifnone(arg_comments, {}) anchor_id = get_anchor(elt) elt = getattr(elt, '__func__', elt) full_name = full_name or fn_name(elt) if inspect.isclass(elt): if is_enum(elt.__class__): name,args = get_enum_doc(elt, full_name) else: name,args = get_cls_doc(elt, full_name) elif isinstance(elt, Callable): name,args = format_ft_def(elt, full_name) else: raise Exception(f'doc definition not supported for {full_name}') source_link = get_function_source(elt) if is_fastai_class(elt) else "" test_link, test_modal = get_pytest_html(elt, anchor_id=anchor_id) if show_tests else ('', '') title_level = ifnone(title_level, 2 if inspect.isclass(elt) else 4) doc = f'<h{title_level} id="{anchor_id}" class="doc_header">{name}{source_link}{test_link}</h{title_level}>' doc += f'\n\n> {args}\n\n' doc += f'{test_modal}' if doc_string and (inspect.getdoc(elt) or arg_comments): doc += format_docstring(elt, arg_comments, alt_doc_string, ignore_warn) + ' ' if markdown: display(Markdown(doc)) else: return doc
[]
Please provide a description of the function:def doc(elt): "Show `show_doc` info in preview window along with link to full docs." global use_relative_links use_relative_links = False elt = getattr(elt, '__func__', elt) md = show_doc(elt, markdown=False) if is_fastai_class(elt): md += f'\n\n<a href="{get_fn_link(elt)}" target="_blank" rel="noreferrer noopener">Show in docs</a>' output = HTMLExporter().markdown2html(md) use_relative_links = True if IS_IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output) else: try: page.page({'text/html': output}) except: display(Markdown(md))
[]
Please provide a description of the function:def format_docstring(elt, arg_comments:dict={}, alt_doc_string:str='', ignore_warn:bool=False)->str: "Merge and format the docstring definition with `arg_comments` and `alt_doc_string`." parsed = "" doc = parse_docstring(inspect.getdoc(elt)) description = alt_doc_string or f"{doc['short_description']} {doc['long_description']}" if description: parsed += f'\n\n{link_docstring(inspect.getmodule(elt), description)}' resolved_comments = {**doc.get('comments', {}), **arg_comments} # arg_comments takes priority args = inspect.getfullargspec(elt).args if not is_enum(elt.__class__) else elt.__members__.keys() if resolved_comments: parsed += '\n' for a in resolved_comments: parsed += f'\n- *{a}*: {resolved_comments[a]}' if a not in args and not ignore_warn: warn(f'Doc arg mismatch: {a}') return_comment = arg_comments.get('return') or doc.get('return') if return_comment: parsed += f'\n\n*return*: {return_comment}' return parsed
[]
Please provide a description of the function:def link_docstring(modules, docstring:str, overwrite:bool=False)->str: "Search `docstring` for backticks and attempt to link those functions to respective documentation." mods = listify(modules) for mod in mods: _modvars.update(mod.__dict__) # concat all module definitions return re.sub(BT_REGEX, replace_link, docstring)
[]
Please provide a description of the function:def find_elt(modvars, keyword, match_last=False): "Attempt to resolve keywords such as Learner.lr_find. `match_last` starts matching from last component." keyword = strip_fastai(keyword) if keyword in modvars: return modvars[keyword] comps = keyword.split('.') comp_elt = modvars.get(comps[0]) if hasattr(comp_elt, '__dict__'): return find_elt(comp_elt.__dict__, '.'.join(comps[1:]), match_last=match_last)
[]
Please provide a description of the function:def import_mod(mod_name:str, ignore_errors=False): "Return module from `mod_name`." splits = str.split(mod_name, '.') try: if len(splits) > 1 : mod = importlib.import_module('.' + '.'.join(splits[1:]), splits[0]) else: mod = importlib.import_module(mod_name) return mod except: if not ignore_errors: print(f"Module {mod_name} doesn't exist.")
[]
Please provide a description of the function:def show_doc_from_name(mod_name, ft_name:str, doc_string:bool=True, arg_comments:dict={}, alt_doc_string:str=''): "Show documentation for `ft_name`, see `show_doc`." mod = import_mod(mod_name) splits = str.split(ft_name, '.') assert hasattr(mod, splits[0]), print(f"Module {mod_name} doesn't have a function named {splits[0]}.") elt = getattr(mod, splits[0]) for i,split in enumerate(splits[1:]): assert hasattr(elt, split), print(f"Class {'.'.join(splits[:i+1])} doesn't have a function named {split}.") elt = getattr(elt, split) show_doc(elt, doc_string, ft_name, arg_comments, alt_doc_string)
[]
Please provide a description of the function:def get_ft_names(mod, include_inner=False)->List[str]: "Return all the functions of module `mod`." # If the module has an attribute __all__, it picks those. # Otherwise, it returns all the functions defined inside a module. fn_names = [] for elt_name in get_exports(mod): elt = getattr(mod,elt_name) #This removes the files imported from elsewhere try: fname = inspect.getfile(elt) except: continue if mod.__file__.endswith('__init__.py'): if inspect.ismodule(elt): fn_names.append(elt_name) else: continue else: if (fname != mod.__file__): continue if inspect.isclass(elt) or inspect.isfunction(elt): fn_names.append(elt_name) else: continue if include_inner and inspect.isclass(elt) and not is_enum(elt.__class__): fn_names.extend(get_inner_fts(elt)) return fn_names
[]
Please provide a description of the function:def get_inner_fts(elt)->List[str]: "List the inner functions of a class." fts = [] for ft_name in elt.__dict__.keys(): if ft_name.startswith('_'): continue ft = getattr(elt, ft_name) if inspect.isfunction(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.ismethod(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.isclass(ft): fts += [f'{elt.__name__}.{n}' for n in get_inner_fts(ft)] return fts
[]
Please provide a description of the function:def get_module_toc(mod_name): "Display table of contents for given `mod_name`." mod = import_mod(mod_name) ft_names = mod.__all__ if hasattr(mod,'__all__') else get_ft_names(mod) ft_names.sort(key = str.lower) tabmat = '' for ft_name in ft_names: tabmat += f'- [{ft_name}](#{ft_name})\n' elt = getattr(mod, ft_name) if inspect.isclass(elt) and not is_enum(elt.__class__): in_ft_names = get_inner_fts(elt) for name in in_ft_names: tabmat += f' - [{name}](#{name})\n' display(Markdown(tabmat))
[]
Please provide a description of the function:def get_fn_link(ft)->str: "Return function link to notebook documentation of `ft`. Private functions link to source code" ft = getattr(ft, '__func__', ft) anchor = strip_fastai(get_anchor(ft)) module_name = strip_fastai(get_module_name(ft)) base = '' if use_relative_links else FASTAI_DOCS return f'{base}/{module_name}.html#{anchor}'
[]
Please provide a description of the function:def get_pytorch_link(ft)->str: "Returns link to pytorch docs of `ft`." name = ft.__name__ ext = '.html' if name == 'device': return f'{PYTORCH_DOCS}tensor_attributes{ext}#torch-device' if name == 'Tensor': return f'{PYTORCH_DOCS}tensors{ext}#torch-tensor' if name.startswith('torchvision'): doc_path = get_module_name(ft).replace('.', '/') if inspect.ismodule(ft): name = name.replace('.', '-') return f'{PYTORCH_DOCS}{doc_path}{ext}#{name}' if name.startswith('torch.nn') and inspect.ismodule(ft): # nn.functional is special case nn_link = name.replace('.', '-') return f'{PYTORCH_DOCS}nn{ext}#{nn_link}' paths = get_module_name(ft).split('.') if len(paths) == 1: return f'{PYTORCH_DOCS}{paths[0]}{ext}#{paths[0]}.{name}' offset = 1 if paths[1] == 'utils' else 0 # utils is a pytorch special case doc_path = paths[1+offset] if inspect.ismodule(ft): return f'{PYTORCH_DOCS}{doc_path}{ext}#module-{name}' fnlink = '.'.join(paths[:(2+offset)]+[name]) return f'{PYTORCH_DOCS}{doc_path}{ext}#{fnlink}'
[]
Please provide a description of the function:def get_source_link(file, line, display_text="[source]", **kwargs)->str: "Returns github link for given file" link = f"{SOURCE_URL}{file}#L{line}" if display_text is None: return link return f'<a href="{link}" class="source_link" style="float:right">{display_text}</a>'
[]
Please provide a description of the function:def get_function_source(ft, **kwargs)->str: "Returns link to `ft` in source code." try: line = inspect.getsourcelines(ft)[1] except Exception: return '' mod_path = get_module_name(ft).replace('.', '/') + '.py' return get_source_link(mod_path, line, **kwargs)
[]
Please provide a description of the function:def find_comment_markers(cellsource): found = {} for line in cellsource.splitlines(): line = line.strip() if line.startswith('#'): # print("Found comment in '{}'".format(line)) comment = line.lstrip('#').strip() if comment in comment_markers: # print("Found marker {}".format(comment)) marker = comment_markers[comment] if not isinstance(marker, tuple): # If not an explicit tuple ('option', True/False), # imply ('option', True) marker = (marker, True) marker_type = marker[0] if marker_type in found: warnings.warn( "Conflicting comment markers found, using the latest: " " %s VS %s" % (found[marker_type], comment)) found[marker_type] = comment yield marker
[ "Look through the cell source for comments which affect nbval's behaviour\n\n Yield an iterable of ``(MARKER_TYPE, True)``.\n " ]
Please provide a description of the function:def coalesce_streams(outputs): if not outputs: return outputs new_outputs = [] streams = {} for output in outputs: if (output.output_type == 'stream'): if output.name in streams: streams[output.name].text += output.text else: new_outputs.append(output) streams[output.name] = output else: new_outputs.append(output) # process \r and \b characters for output in streams.values(): old = output.text while len(output.text) < len(old): old = output.text # Cancel out anything-but-newline followed by backspace output.text = backspace_pat.sub('', output.text) # Replace all carriage returns not followed by newline output.text = carriagereturn_pat.sub('', output.text) return new_outputs
[ "\n Merge all stream outputs with shared names into single streams\n to ensure deterministic outputs.\n\n Parameters\n ----------\n outputs : iterable of NotebookNodes\n Outputs being processed\n " ]
Please provide a description of the function:def transform_streams_for_comparison(outputs): new_outputs = [] for output in outputs: if (output.output_type == 'stream'): # Transform output new_outputs.append({ 'output_type': 'stream', output.name: output.text, }) else: new_outputs.append(output) return new_outputs
[ "Makes failure output for streams better by having key be the stream name" ]
Please provide a description of the function:def _trim_base64(s): if len(s) > 64 and _base64.match(s.replace('\n', '')): h = hash_string(s) s = '%s...<snip base64, md5=%s...>' % (s[:8], h[:16]) return s
[ "Trim and hash base64 strings" ]
Please provide a description of the function:def _indent(s, indent=' '): if isinstance(s, six.string_types): return '\n'.join(('%s%s' % (indent, line) for line in s.splitlines())) return s
[ "Intent each line with indent" ]
Please provide a description of the function:def setup(self): if self.parent.config.option.current_env: kernel_name = CURRENT_ENV_KERNEL_NAME else: kernel_name = self.nb.metadata.get( 'kernelspec', {}).get('name', 'python') self.kernel = RunningKernel(kernel_name, str(self.fspath.dirname)) self.setup_sanitize_files() if getattr(self.parent.config.option, 'cov_source', None): setup_coverage(self.parent.config, self.kernel, getattr(self, "fspath", None))
[ "\n Called by pytest to setup the collector cells in .\n Here we start a kernel and setup the sanitize patterns.\n " ]
Please provide a description of the function:def setup_sanitize_files(self): for fname in self.get_sanitize_files(): with open(fname, 'r') as f: self.sanitize_patterns.update(get_sanitize_patterns(f.read()))
[ "\n For each of the sanitize files that were specified as command line options\n load the contents of the file into the sanitise patterns dictionary.\n " ]
Please provide a description of the function:def get_sanitize_files(self): if self.parent.config.option.sanitize_with is not None: return [self.parent.config.option.sanitize_with] else: return []
[ "\n Return list of all sanitize files provided by the user on the command line.\n\n N.B.: We only support one sanitize file at the moment, but\n this is likely to change in the future\n\n " ]
Please provide a description of the function:def get_kernel_message(self, timeout=None, stream='iopub'): return self.kernel.get_message(stream, timeout=timeout)
[ "\n Gets a message from the iopub channel of the notebook kernel.\n " ]
Please provide a description of the function:def collect(self): self.nb = nbformat.read(str(self.fspath), as_version=4) # Start the cell count cell_num = 0 # Iterate over the cells in the notebook for cell in self.nb.cells: # Skip the cells that have text, headings or related stuff # Only test code cells if cell.cell_type == 'code': # The cell may contain a comment indicating that its output # should be checked or ignored. If it doesn't, use the default # behaviour. The --nbval option checks unmarked cells. with warnings.catch_warnings(record=True) as ws: options = defaultdict(bool, find_metadata_tags(cell.metadata)) comment_opts = dict(find_comment_markers(cell.source)) if set(comment_opts.keys()) & set(options.keys()): warnings.warn( "Overlapping options from comments and metadata, " "using options from comments: %s" % str(set(comment_opts.keys()) & set(options.keys()))) for w in ws: self.parent.config.warn( "C1", str(w.message), '%s:Cell %d' % ( getattr(self, "fspath", None), cell_num)) options.update(comment_opts) options.setdefault('check', self.compare_outputs) yield IPyNbCell('Cell ' + str(cell_num), self, cell_num, cell, options) # Update 'code' cell count cell_num += 1
[ "\n The collect function is required by pytest and is used to yield pytest\n Item objects. We specify an Item for each code cell in the notebook.\n " ]
Please provide a description of the function:def repr_failure(self, excinfo): exc = excinfo.value cc = self.colors if isinstance(exc, NbCellError): msg_items = [ cc.FAIL + "Notebook cell execution failed" + cc.ENDC] formatstring = ( cc.OKBLUE + "Cell %d: %s\n\n" + "Input:\n" + cc.ENDC + "%s\n") msg_items.append(formatstring % ( exc.cell_num, str(exc), exc.source )) if exc.inner_traceback: msg_items.append(( cc.OKBLUE + "Traceback:" + cc.ENDC + "\n%s\n") % exc.inner_traceback) return "\n".join(msg_items) else: return "pytest plugin exception: %s" % str(exc)
[ " called when self.runtest() raises an exception. " ]
Please provide a description of the function:def format_output_compare(self, key, left, right): if isinstance(left, six.string_types): left = _trim_base64(left) if isinstance(right, six.string_types): right = _trim_base64(right) cc = self.colors self.comparison_traceback.append( cc.OKBLUE + " mismatch '%s'" % key + cc.FAIL) # Use comparison repr from pytest: hook_result = self.ihook.pytest_assertrepr_compare( config=self.config, op='==', left=left, right=right) for new_expl in hook_result: if new_expl: new_expl = [' %s' % line.replace("\n", "\\n") for line in new_expl] self.comparison_traceback.append("\n assert reference_output == test_output failed:\n") self.comparison_traceback.extend(new_expl) break else: # Fallback repr: self.comparison_traceback.append( " <<<<<<<<<<<< Reference output from ipynb file:" + cc.ENDC) self.comparison_traceback.append(_indent(left)) self.comparison_traceback.append( cc.FAIL + ' ============ disagrees with newly computed (test) output:' + cc.ENDC) self.comparison_traceback.append(_indent(right)) self.comparison_traceback.append( cc.FAIL + ' >>>>>>>>>>>>') self.comparison_traceback.append(cc.ENDC)
[ "Format an output for printing" ]
Please provide a description of the function:def sanitize(self, s): if not isinstance(s, six.string_types): return s for regex, replace in six.iteritems(self.parent.sanitize_patterns): s = re.sub(regex, replace, s) return s
[ "sanitize a string for comparison.\n ", "\n re.sub matches a regex and replaces it with another.\n The regex replacements are taken from a file if the option\n is passed when py.test is called. Otherwise, the strings\n are not processed\n " ]
Please provide a description of the function:def _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, scale:float=1.35) -> Iterator[List[Tensor]]: "Computes the outputs for several augmented inputs for TTA" dl = learn.dl(ds_type) ds = dl.dataset old = ds.tfms augm_tfm = [o for o in learn.data.train_ds.tfms if o.tfm not in (crop_pad, flip_lr, dihedral, zoom)] try: pbar = master_bar(range(8)) for i in pbar: row = 1 if i&1 else 0 col = 1 if i&2 else 0 flip = i&4 d = {'row_pct':row, 'col_pct':col, 'is_random':False} tfm = [*augm_tfm, zoom(scale=scale, **d), crop_pad(**d)] if flip: tfm.append(flip_lr(p=1.)) ds.tfms = tfm yield get_preds(learn.model, dl, pbar=pbar, activ=_loss_func2activ(learn.loss_func))[0] finally: ds.tfms = old
[]
Please provide a description of the function:def _TTA(learn:Learner, beta:float=0.4, scale:float=1.35, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False) -> Tensors: "Applies TTA to predict on `ds_type` dataset." preds,y = learn.get_preds(ds_type) all_preds = list(learn.tta_only(scale=scale, ds_type=ds_type)) avg_preds = torch.stack(all_preds).mean(0) if beta is None: return preds,avg_preds,y else: final_preds = preds*beta + avg_preds*(1-beta) if with_loss: with NoneReduceOnCPU(learn.loss_func) as lf: loss = lf(final_preds, y) return final_preds, y, loss return final_preds, y
[]
Please provide a description of the function:def fbeta(y_pred:Tensor, y_true:Tensor, thresh:float=0.2, beta:float=2, eps:float=1e-9, sigmoid:bool=True)->Rank0Tensor: "Computes the f_beta between `preds` and `targets`" beta2 = beta ** 2 if sigmoid: y_pred = y_pred.sigmoid() y_pred = (y_pred>thresh).float() y_true = y_true.float() TP = (y_pred*y_true).sum(dim=1) prec = TP/(y_pred.sum(dim=1)+eps) rec = TP/(y_true.sum(dim=1)+eps) res = (prec*rec)/(prec*beta2+rec+eps)*(1+beta2) return res.mean()
[]
Please provide a description of the function:def accuracy(input:Tensor, targs:Tensor)->Rank0Tensor: "Compute accuracy with `targs` when `input` is bs * n_classes." n = targs.shape[0] input = input.argmax(dim=-1).view(n,-1) targs = targs.view(n,-1) return (input==targs).float().mean()
[]
Please provide a description of the function:def accuracy_thresh(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor: "Compute accuracy when `y_pred` and `y_true` are the same size." if sigmoid: y_pred = y_pred.sigmoid() return ((y_pred>thresh)==y_true.byte()).float().mean()
[]
Please provide a description of the function:def top_k_accuracy(input:Tensor, targs:Tensor, k:int=5)->Rank0Tensor: "Computes the Top-k accuracy (target is in the top k predictions)." input = input.topk(k=k, dim=-1)[1] targs = targs.unsqueeze(dim=-1).expand_as(input) return (input == targs).max(dim=-1)[0].float().mean()
[]
Please provide a description of the function:def dice(input:Tensor, targs:Tensor, iou:bool=False)->Rank0Tensor: "Dice coefficient metric for binary target. If iou=True, returns iou metric, classic for segmentation problems." n = targs.shape[0] input = input.argmax(dim=1).view(n,-1) targs = targs.view(n,-1) intersect = (input * targs).sum().float() union = (input+targs).sum().float() if not iou: return (2. * intersect / union if union > 0 else union.new([1.]).squeeze()) else: return intersect / (union-intersect+1.0)
[]
Please provide a description of the function:def exp_rmspe(pred:Tensor, targ:Tensor)->Rank0Tensor: "Exp RMSE between `pred` and `targ`." pred,targ = flatten_check(pred,targ) pred, targ = torch.exp(pred), torch.exp(targ) pct_var = (targ - pred)/targ return torch.sqrt((pct_var**2).mean())
[]
Please provide a description of the function:def mean_absolute_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean absolute error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return torch.abs(targ - pred).mean()
[]
Please provide a description of the function:def mean_squared_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean squared error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return F.mse_loss(pred, targ)
[]
Please provide a description of the function:def root_mean_squared_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Root mean squared error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return torch.sqrt(F.mse_loss(pred, targ))
[]
Please provide a description of the function:def mean_squared_logarithmic_error(pred:Tensor, targ:Tensor)->Rank0Tensor: "Mean squared logarithmic error between `pred` and `targ`." pred,targ = flatten_check(pred,targ) return F.mse_loss(torch.log(1 + pred), torch.log(1 + targ))
[]
Please provide a description of the function:def explained_variance(pred:Tensor, targ:Tensor)->Rank0Tensor: "Explained variance between `pred` and `targ`." pred,targ = flatten_check(pred,targ) var_pct = torch.var(targ - pred) / torch.var(targ) return 1 - var_pct
[]
Please provide a description of the function:def r2_score(pred:Tensor, targ:Tensor)->Rank0Tensor: "R2 score (coefficient of determination) between `pred` and `targ`." pred,targ = flatten_check(pred,targ) u = torch.sum((targ - pred) ** 2) d = torch.sum((targ - targ.mean()) ** 2) return 1 - u / d
[]
Please provide a description of the function:def auc_roc_score(input:Tensor, targ:Tensor): "Using trapezoid method to calculate the area under roc curve" fpr, tpr = roc_curve(input, targ) d = fpr[1:] - fpr[:-1] sl1, sl2 = [slice(None)], [slice(None)] sl1[-1], sl2[-1] = slice(1, None), slice(None, -1) return (d * (tpr[tuple(sl1)] + tpr[tuple(sl2)]) / 2.).sum(-1)
[]
Please provide a description of the function:def roc_curve(input:Tensor, targ:Tensor): "Returns the false positive and true positive rates" targ = (targ == 1) desc_score_indices = torch.flip(input.argsort(-1), [-1]) input = input[desc_score_indices] targ = targ[desc_score_indices] d = input[1:] - input[:-1] distinct_value_indices = torch.nonzero(d).transpose(0,1)[0] threshold_idxs = torch.cat((distinct_value_indices, LongTensor([len(targ) - 1]).to(targ.device))) tps = torch.cumsum(targ * 1, dim=-1)[threshold_idxs] fps = (1 + threshold_idxs - tps) if tps[0] != 0 or fps[0] != 0: fps = torch.cat((LongTensor([0]), fps)) tps = torch.cat((LongTensor([0]), tps)) fpr, tpr = fps.float() / fps[-1], tps.float() / tps[-1] return fpr, tpr
[]
Please provide a description of the function:def A(*a): return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
[ "convert iterable object into numpy array" ]
Please provide a description of the function:def T(a, half=False, cuda=True): if not torch.is_tensor(a): a = np.array(np.ascontiguousarray(a)) if a.dtype in (np.int8, np.int16, np.int32, np.int64): a = torch.LongTensor(a.astype(np.int64)) elif a.dtype in (np.float32, np.float64): a = to_half(a) if half else torch.FloatTensor(a) else: raise NotImplementedError(a.dtype) if cuda: a = to_gpu(a) return a
[ "\n Convert numpy array into a pytorch tensor. \n if Cuda is available and USE_GPU=True, store resulting tensor in GPU.\n " ]
Please provide a description of the function:def V_(x, requires_grad=False, volatile=False): '''equivalent to create_variable, which creates a pytorch tensor''' return create_variable(x, volatile=volatile, requires_grad=requires_grad)
[]
Please provide a description of the function:def V(x, requires_grad=False, volatile=False): '''creates a single or a list of pytorch tensors, depending on input x. ''' return map_over(x, lambda o: V_(o, requires_grad, volatile))
[]
Please provide a description of the function:def to_np(v): '''returns an np.array object given an input of np.array, list, tuple, torch variable or tensor.''' if isinstance(v, float): return np.array(v) if isinstance(v, (np.ndarray, np.generic)): return v if isinstance(v, (list,tuple)): return [to_np(o) for o in v] if isinstance(v, Variable): v=v.data if torch.cuda.is_available(): if is_half_tensor(v): v=v.float() if isinstance(v, torch.FloatTensor): v=v.float() return v.cpu().numpy()
[]
Please provide a description of the function:def to_gpu(x, *args, **kwargs): '''puts pytorch variable to gpu, if cuda is available and USE_GPU is set to true. ''' return x.cuda(*args, **kwargs) if USE_GPU else x
[]
Please provide a description of the function:def split_by_idxs(seq, idxs): '''A generator that returns sequence pieces, seperated by indexes specified in idxs. ''' last = 0 for idx in idxs: if not (-len(seq) <= idx < len(seq)): raise KeyError(f'Idx {idx} is out-of-bounds') yield seq[last:idx] last = idx yield seq[last:]
[]
Please provide a description of the function:def partition(a, sz): return [a[i:i+sz] for i in range(0, len(a), sz)]
[ "splits iterables a in equal parts of size sz" ]
Please provide a description of the function:def chunk_iter(iterable, chunk_size): '''A generator that yields chunks of iterable, chunk_size at a time. ''' while True: chunk = [] try: for _ in range(chunk_size): chunk.append(next(iterable)) yield chunk except StopIteration: if chunk: yield chunk break
[]
Please provide a description of the function:def _brightness(x, change:uniform): "Apply `change` in brightness of image `x`." return x.add_(scipy.special.logit(change))
[]
Please provide a description of the function:def _rotate(degrees:uniform): "Rotate image by `degrees`." angle = degrees * math.pi / 180 return [[cos(angle), -sin(angle), 0.], [sin(angle), cos(angle), 0.], [0. , 0. , 1.]]
[]
Please provide a description of the function:def _get_zoom_mat(sw:float, sh:float, c:float, r:float)->AffineMatrix: "`sw`,`sh` scale width,height - `c`,`r` focus col,row." return [[sw, 0, c], [0, sh, r], [0, 0, 1.]]
[]
Please provide a description of the function:def _zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5): "Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom." s = 1-1/scale col_c = s * (2*col_pct - 1) row_c = s * (2*row_pct - 1) return _get_zoom_mat(1/scale, 1/scale, col_c, row_c)
[]
Please provide a description of the function:def _squish(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5): "Squish image by `scale`. `row_pct`,`col_pct` select focal point of zoom." if scale <= 1: col_c = (1-scale) * (2*col_pct - 1) return _get_zoom_mat(scale, 1, col_c, 0.) else: row_c = (1-1/scale) * (2*row_pct - 1) return _get_zoom_mat(1, 1/scale, 0., row_c)
[]
Please provide a description of the function:def _jitter(c, magnitude:uniform): "Replace pixels by random neighbors at `magnitude`." c.flow.add_((torch.rand_like(c.flow)-0.5)*magnitude*2) return c
[]
Please provide a description of the function:def _flip_lr(x): "Flip `x` horizontally." #return x.flip(2) if isinstance(x, ImagePoints): x.flow.flow[...,0] *= -1 return x return tensor(np.ascontiguousarray(np.array(x)[...,::-1]))
[]