Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def on_batch_end(self, train, **kwargs):
"Take the stored results and puts it in `self.stats`"
if train: self.stats.append(self.hooks.stored) | [] |
Please provide a description of the function:def plots_from_files(imspaths, figsize=(10,5), rows=1, titles=None, maintitle=None):
f = plt.figure(figsize=figsize)
if maintitle is not None: plt.suptitle(maintitle, fontsize=16)
for i in range(len(imspaths)):
sp = f.add_subplot(rows, ceildiv(len(imspaths), rows), i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=16)
img = plt.imread(imspaths[i])
plt.imshow(img) | [
"Plots images given image files.\n\n Arguments:\n im_paths (list): list of paths\n figsize (tuple): figure size\n rows (int): number of rows\n titles (list): list of titles\n maintitle (string): main title\n "
] |
Please provide a description of the function:def plot_val_with_title(self, idxs, y):
# if there are any samples to be displayed
if len(idxs) > 0:
imgs = np.stack([self.ds[x][0] for x in idxs])
title_probs = [self.probs[x,y] for x in idxs]
return plots(self.ds.denorm(imgs), rows=1, titles=title_probs)
# if idxs is empty return false
else:
return False; | [
" Displays the images and their probabilities of belonging to a certain class\n\n Arguments:\n idxs (numpy.ndarray): indexes of the image samples from the dataset\n y (int): the selected class\n\n Returns:\n Plots the images in n rows [rows = n]\n "
] |
Please provide a description of the function:def most_by_mask(self, mask, y, mult):
idxs = np.where(mask)[0]
cnt = min(4, len(idxs))
return idxs[np.argsort(mult * self.probs[idxs,y])[:cnt]] | [
" Extracts the first 4 most correct/incorrect indexes from the ordered list of probabilities\n\n Arguments:\n mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else\n y (int): the selected class\n mult (int): sets the ordering; -1 descending, 1 ascending\n\n Returns:\n idxs (ndarray): An array of indexes of length 4\n "
] |
Please provide a description of the function:def most_uncertain_by_mask(self, mask, y):
idxs = np.where(mask)[0]
# the most uncertain samples will have abs(probs-1/num_classes) close to 0;
return idxs[np.argsort(np.abs(self.probs[idxs,y]-(1/self.num_classes)))[:4]] | [
" Extracts the first 4 most uncertain indexes from the ordered list of probabilities\n\n Arguments:\n mask (numpy.ndarray): the mask of probabilities specific to the selected class; a boolean array with shape (num_of_samples,) which contains True where class==selected_class, and False everywhere else\n y (int): the selected class\n\n Returns:\n idxs (ndarray): An array of indexes of length 4\n "
] |
Please provide a description of the function:def most_by_correct(self, y, is_correct):
# mult=-1 when the is_correct flag is true -> when we want to display the most correct classes we will make a descending sorting (argsort) because we want that the biggest probabilities to be displayed first.
# When is_correct is false, we want to display the most incorrect classes, so we want an ascending sorting since our interest is in the smallest probabilities.
mult = -1 if is_correct==True else 1
return self.most_by_mask(((self.preds == self.ds.y)==is_correct)
& (self.ds.y == y), y, mult) | [
" Extracts the predicted classes which correspond to the selected class (y) and to the specific case (prediction is correct - is_true=True, prediction is wrong - is_true=False)\n\n Arguments:\n y (int): the selected class\n is_correct (boolean): a boolean flag (True, False) which specify the what to look for. Ex: True - most correct samples, False - most incorrect samples\n\n Returns:\n idxs (numpy.ndarray): An array of indexes (numpy.ndarray)\n "
] |
Please provide a description of the function:def plot_by_correct(self, y, is_correct):
return self.plot_val_with_title(self.most_by_correct(y, is_correct), y) | [
" Plots the images which correspond to the selected class (y) and to the specific case (prediction is correct - is_true=True, prediction is wrong - is_true=False)\n\n Arguments:\n y (int): the selected class\n is_correct (boolean): a boolean flag (True, False) which specify the what to look for. Ex: True - most correct samples, False - most incorrect samples\n "
] |
Please provide a description of the function:def most_by_uncertain(self, y):
return self.most_uncertain_by_mask((self.ds.y == y), y) | [
" Extracts the predicted classes which correspond to the selected class (y) and have probabilities nearest to 1/number_of_classes (eg. 0.5 for 2 classes, 0.33 for 3 classes) for the selected class.\n\n Arguments:\n y (int): the selected class\n\n Returns:\n idxs (numpy.ndarray): An array of indexes (numpy.ndarray)\n "
] |
Please provide a description of the function:def main(
gpus:Param("The GPUs to use for distributed training", str)='all',
script:Param("Script to run", str, opt=False)='',
args:Param("Args to pass to script", nargs='...', opt=False)=''
):
"PyTorch distributed training launch helper that spawns multiple distributed processes"
# Loosely based on torch.distributed.launch
current_env = os.environ.copy()
gpus = list(range(torch.cuda.device_count())) if gpus=='all' else list(gpus)
current_env["WORLD_SIZE"] = str(len(gpus))
current_env["MASTER_ADDR"] = '127.0.0.1'
current_env["MASTER_PORT"] = '29500'
processes = []
for i,gpu in enumerate(gpus):
current_env["RANK"] = str(i)
cmd = [sys.executable, "-u", script, f"--gpu={gpu}"] + args
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes: process.wait() | [] |
Please provide a description of the function:def on_train_begin(self, **kwargs):
"Add the metrics names to the `Recorder`."
self.names = ifnone(self.learn.loss_func.metric_names, [])
if not self.names: warn('LossMetrics requested but no loss_func.metric_names provided')
self.learn.recorder.add_metric_names(self.names) | [] |
Please provide a description of the function:def on_epoch_begin(self, **kwargs):
"Initialize the metrics for this epoch."
self.metrics = {name:0. for name in self.names}
self.nums = 0 | [] |
Please provide a description of the function:def on_batch_end(self, last_target, train, **kwargs):
"Update the metrics if not `train`"
if train: return
bs = last_target.size(0)
for name in self.names:
self.metrics[name] += bs * self.learn.loss_func.metrics[name].detach().cpu()
self.nums += bs | [] |
Please provide a description of the function:def on_epoch_end(self, last_metrics, **kwargs):
"Finish the computation and sends the result to the Recorder."
if not self.nums: return
metrics = [self.metrics[name]/self.nums for name in self.names]
return {'last_metrics': last_metrics+metrics} | [] |
Please provide a description of the function:def on_train_begin(self, **kwargs):
"Create the various optimizers."
self.G_A,self.G_B = self.learn.model.G_A,self.learn.model.G_B
self.D_A,self.D_B = self.learn.model.D_A,self.learn.model.D_B
self.crit = self.learn.loss_func.crit
self.opt_G = self.learn.opt.new([nn.Sequential(*flatten_model(self.G_A), *flatten_model(self.G_B))])
self.opt_D_A = self.learn.opt.new([nn.Sequential(*flatten_model(self.D_A))])
self.opt_D_B = self.learn.opt.new([nn.Sequential(*flatten_model(self.D_B))])
self.learn.opt.opt = self.opt_G.opt
self._set_trainable()
self.names = ['idt_loss', 'gen_loss', 'cyc_loss', 'da_loss', 'db_loss']
self.learn.recorder.no_val=True
self.learn.recorder.add_metric_names(self.names)
self.smootheners = {n:SmoothenValue(0.98) for n in self.names} | [] |
Please provide a description of the function:def on_batch_end(self, last_input, last_output, **kwargs):
"Steps through the generators then each of the critics."
self.G_A.zero_grad(); self.G_B.zero_grad()
fake_A, fake_B = last_output[0].detach(), last_output[1].detach()
real_A, real_B = last_input
self._set_trainable(D_A=True)
self.D_A.zero_grad()
loss_D_A = 0.5 * (self.crit(self.D_A(real_A), True) + self.crit(self.D_A(fake_A), False))
loss_D_A.backward()
self.opt_D_A.step()
self._set_trainable(D_B=True)
self.D_B.zero_grad()
loss_D_B = 0.5 * (self.crit(self.D_B(real_B), True) + self.crit(self.D_B(fake_B), False))
loss_D_B.backward()
self.opt_D_B.step()
self._set_trainable()
metrics = self.learn.loss_func.metrics + [loss_D_A, loss_D_B]
for n,m in zip(self.names,metrics): self.smootheners[n].add_value(m) | [] |
Please provide a description of the function:def on_epoch_end(self, last_metrics, **kwargs):
"Put the various losses in the recorder."
return add_metrics(last_metrics, [s.smooth for k,s in self.smootheners.items()]) | [] |
Please provide a description of the function:def on_train_begin(self, **kwargs: Any) -> None:
"Prepare file with metric names."
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = self.path.open('a') if self.append else self.path.open('w')
self.file.write(','.join(self.learn.recorder.names[:(None if self.add_time else -1)]) + '\n') | [] |
Please provide a description of the function:def on_epoch_end(self, epoch: int, smooth_loss: Tensor, last_metrics: MetricsList, **kwargs: Any) -> bool:
"Add a line with `epoch` number, `smooth_loss` and `last_metrics`."
last_metrics = ifnone(last_metrics, [])
stats = [str(stat) if isinstance(stat, int) else '#na#' if stat is None else f'{stat:.6f}'
for name, stat in zip(self.learn.recorder.names, [epoch, smooth_loss] + last_metrics)]
if self.add_time: stats.append(format_time(time() - self.start_epoch))
str_stats = ','.join(stats)
self.file.write(str_stats + '\n') | [] |
Please provide a description of the function:def get_master(layer_groups:ModuleList, flat_master:bool=False) -> Tuple[List[List[Tensor]], List[List[Tensor]]]:
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_params = split_no_wd_params(layer_groups)
model_params = [[param for param in pg if param.requires_grad] for pg in split_params]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) !=0 :
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else: master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp: param.requires_grad = True
return model_params, master_params | [] |
Please provide a description of the function:def model_g2master_g(model_params:Sequence[Tensor], master_params:Sequence[Tensor], flat_master:bool=False)->None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group,master_group in zip(model_params,master_params):
if len(master_group) != 0:
if master_group[0].grad is None: master_group[0].grad = master_group[0].data.new(*master_group[0].data.size())
master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
else:
for model_group,master_group in zip(model_params,master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None: master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else: master.grad = None | [] |
Please provide a description of the function:def master2model(model_params:Sequence[Tensor], master_params:Sequence[Tensor], flat_master:bool=False)->None:
"Copy `master_params` to `model_params`."
if flat_master:
for model_group,master_group in zip(model_params,master_params):
if len(model_group) != 0:
for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)):
model.data.copy_(master)
else:
for model_group,master_group in zip(model_params,master_params):
for model, master in zip(model_group, master_group): model.data.copy_(master.data) | [] |
Please provide a description of the function:def on_train_begin(self, **kwargs:Any)->None:
"Prepare the master model."
#Get a copy of the model params in FP32
self.model_params, self.master_params = get_master(self.learn.layer_groups, self.flat_master)
#Changes the optimizer so that the optimization step is done in FP32.
new_opt = self.learn.opt.new_with_params(self.master_params)
if self.opt is not None:
self.opt.lr,self.opt.wd = self.learn.opt.lr,self.learn.opt.wd
new_opt.load_state_dict(self.opt)
self.learn.opt.opt = new_opt.opt
self.noskip = 0 | [] |
Please provide a description of the function:def on_backward_begin(self, last_loss:Rank0Tensor, **kwargs:Any) -> Rank0Tensor:
"Scale gradients up by `self.loss_scale` to prevent underflow."
#To avoid gradient underflow, we scale the gradients
ret_loss = last_loss * self.loss_scale
return {'last_loss': ret_loss} | [] |
Please provide a description of the function:def on_backward_end(self, **kwargs:Any)->None:
"Convert the gradients back to FP32 and divide them by the scale."
if self.dynamic and grad_overflow(self.model_params) and self.loss_scale > 1:
self.loss_scale /= 2
self.noskip = 0
#The step will be skipped since we don't update the master grads so they are all None or zero
else:
model_g2master_g(self.model_params, self.master_params, self.flat_master)
for group in self.master_params:
for param in group:
if param.grad is not None: param.grad.div_(self.loss_scale)
if self.clip is not None:
for group in self.master_params: nn.utils.clip_grad_norm_(group, self.clip)
if not self.dynamic: return
self.noskip += 1
if self.noskip >= self.max_noskip and self.loss_scale < self.max_scale:
self.loss_scale *= 2
self.noskip = 0 | [] |
Please provide a description of the function:def on_step_end(self, **kwargs:Any)->None:
"Update the params from master to model and zero grad."
#Zeros the gradients of the model since the optimizer is disconnected.
self.learn.model.zero_grad()
#Update the params from master to model.
master2model(self.model_params, self.master_params, self.flat_master) | [] |
Please provide a description of the function:def scale_min(im, targ, interpolation=cv2.INTER_AREA):
r,c,*_ = im.shape
ratio = targ/min(r,c)
sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ))
return cv2.resize(im, sz, interpolation=interpolation) | [
" Scale the image so that the smallest axis is of size targ.\n\n Arguments:\n im (array): image\n targ (int): target size\n "
] |
Please provide a description of the function:def zoom_cv(x,z):
if z==0: return x
r,c,*_ = x.shape
M = cv2.getRotationMatrix2D((c/2,r/2),0,z+1.)
return cv2.warpAffine(x,M,(c,r)) | [
" Zoom the center of image x by a factor of z+1 while retaining the original image size and proportion. "
] |
Please provide a description of the function:def stretch_cv(x,sr,sc,interpolation=cv2.INTER_AREA):
if sr==0 and sc==0: return x
r,c,*_ = x.shape
x = cv2.resize(x, None, fx=sr+1, fy=sc+1, interpolation=interpolation)
nr,nc,*_ = x.shape
cr = (nr-r)//2; cc = (nc-c)//2
return x[cr:r+cr, cc:c+cc] | [
" Stretches image x horizontally by sr+1, and vertically by sc+1 while retaining the original image size and proportion. "
] |
Please provide a description of the function:def dihedral(x, dih):
x = np.rot90(x, dih%4)
return x if dih<4 else np.fliplr(x) | [
" Perform any of 8 permutations of 90-degrees rotations or flips for image x. "
] |
Please provide a description of the function:def lighting(im, b, c):
if b==0 and c==1: return im
mu = np.average(im)
return np.clip((im-mu)*c+mu+b,0.,1.).astype(np.float32) | [
" Adjust image balance and contrast "
] |
Please provide a description of the function:def no_crop(im, min_sz=None, interpolation=cv2.INTER_AREA):
r,c,*_ = im.shape
if min_sz is None: min_sz = min(r,c)
return cv2.resize(im, (min_sz, min_sz), interpolation=interpolation) | [
" Return a squared resized image "
] |
Please provide a description of the function:def center_crop(im, min_sz=None):
r,c,*_ = im.shape
if min_sz is None: min_sz = min(r,c)
start_r = math.ceil((r-min_sz)/2)
start_c = math.ceil((c-min_sz)/2)
return crop(im, start_r, start_c, min_sz) | [
" Return a center crop of an image "
] |
Please provide a description of the function:def googlenet_resize(im, targ, min_area_frac, min_aspect_ratio, max_aspect_ratio, flip_hw_p, interpolation=cv2.INTER_AREA):
h,w,*_ = im.shape
area = h*w
for _ in range(10):
targetArea = random.uniform(min_area_frac, 1.0) * area
aspectR = random.uniform(min_aspect_ratio, max_aspect_ratio)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if flip_hw_p:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else random.randint(0, w - ww)
y1 = 0 if h == hh else random.randint(0, h - hh)
out = im[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (targ, targ), interpolation=interpolation)
return out
out = scale_min(im, targ, interpolation=interpolation)
out = center_crop(out)
return out | [
" Randomly crop an image with an aspect ratio and returns a squared resized image of size targ\n \n References:\n 1. https://arxiv.org/pdf/1409.4842.pdf\n 2. https://arxiv.org/pdf/1802.07888.pdf\n "
] |
Please provide a description of the function:def cutout(im, n_holes, length):
r,c,*_ = im.shape
mask = np.ones((r, c), np.int32)
for n in range(n_holes):
y = np.random.randint(0, r)
x = np.random.randint(0, c)
y1 = int(np.clip(y - length / 2, 0, r))
y2 = int(np.clip(y + length / 2, 0, r))
x1 = int(np.clip(x - length / 2, 0, c))
x2 = int(np.clip(x + length / 2, 0, c))
mask[y1: y2, x1: x2] = 0.
mask = mask[:,:,None]
im = im * mask
return im | [
" Cut out n_holes number of square holes of size length in image at random locations. Holes may overlap. "
] |
Please provide a description of the function:def scale_to(x, ratio, targ):
'''Calculate dimension of an image during scaling with aspect ratio'''
return max(math.floor(x*ratio), targ) | [] |
Please provide a description of the function:def crop(im, r, c, sz):
'''
crop image into a square of size sz,
'''
return im[r:r+sz, c:c+sz] | [] |
Please provide a description of the function:def to_bb(YY, y="deprecated"):
cols,rows = np.nonzero(YY)
if len(cols)==0: return np.zeros(4, dtype=np.float32)
top_row = np.min(rows)
left_col = np.min(cols)
bottom_row = np.max(rows)
right_col = np.max(cols)
return np.array([left_col, top_row, right_col, bottom_row], dtype=np.float32) | [
"Convert mask YY to a bounding box, assumes 0 as background nonzero object"
] |
Please provide a description of the function:def coords2px(y, x):
rows = np.rint([y[0], y[0], y[2], y[2]]).astype(int)
cols = np.rint([y[1], y[3], y[1], y[3]]).astype(int)
r,c,*_ = x.shape
Y = np.zeros((r, c))
Y[rows, cols] = 1
return Y | [
" Transforming coordinates to pixels.\n\n Arguments:\n y : np array\n vector in which (y[0], y[1]) and (y[2], y[3]) are the\n the corners of a bounding box.\n x : image\n an image\n Returns:\n Y : image\n of shape x.shape\n "
] |
Please provide a description of the function:def compose(im, y, fns):
for fn in fns:
#pdb.set_trace()
im, y =fn(im, y)
return im if y is None else (im, y) | [
" Apply a collection of transformation functions :fns: to images "
] |
Please provide a description of the function:def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None,
tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, scale=None):
if tfm_y is None: tfm_y=TfmType.NO
if tfms is None: tfms=[]
elif not isinstance(tfms, collections.Iterable): tfms=[tfms]
if sz_y is None: sz_y = sz
if scale is None:
scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None
else Scale(sz, tfm_y, sz_y=sz_y)]
elif not is_listy(scale): scale = [scale]
if pad: scale.append(AddPadding(pad, mode=pad_mode))
if crop_type!=CropType.GOOGLENET: tfms=scale+tfms
return Transforms(sz, tfms, normalizer, denorm, crop_type,
tfm_y=tfm_y, sz_y=sz_y) | [
"\n Generate a standard set of transformations\n\n Arguments\n ---------\n normalizer :\n image normalizing function\n denorm :\n image denormalizing function\n sz :\n size, sz_y = sz if not specified.\n tfms :\n iterable collection of transformation functions\n max_zoom : float,\n maximum zoom\n pad : int,\n padding on top, left, right and bottom\n crop_type :\n crop type\n tfm_y :\n y axis specific transformations\n sz_y :\n y size, height\n pad_mode :\n cv2 padding style: repeat, reflect, etc.\n\n Returns\n -------\n type : ``Transforms``\n transformer for specified image operations.\n\n See Also\n --------\n Transforms: the transformer object returned by this function\n "
] |
Please provide a description of the function:def tfms_from_stats(stats, sz, aug_tfms=None, max_zoom=None, pad=0, crop_type=CropType.RANDOM,
tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, norm_y=True, scale=None):
if aug_tfms is None: aug_tfms=[]
tfm_norm = Normalize(*stats, tfm_y=tfm_y if norm_y else TfmType.NO) if stats is not None else None
tfm_denorm = Denormalize(*stats) if stats is not None else None
val_crop = CropType.CENTER if crop_type in (CropType.RANDOM,CropType.GOOGLENET) else crop_type
val_tfm = image_gen(tfm_norm, tfm_denorm, sz, pad=pad, crop_type=val_crop,
tfm_y=tfm_y, sz_y=sz_y, scale=scale)
trn_tfm = image_gen(tfm_norm, tfm_denorm, sz, pad=pad, crop_type=crop_type,
tfm_y=tfm_y, sz_y=sz_y, tfms=aug_tfms, max_zoom=max_zoom, pad_mode=pad_mode, scale=scale)
return trn_tfm, val_tfm | [
" Given the statistics of the training image sets, returns separate training and validation transform functions\n "
] |
Please provide a description of the function:def tfms_from_model(f_model, sz, aug_tfms=None, max_zoom=None, pad=0, crop_type=CropType.RANDOM,
tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, norm_y=True, scale=None):
stats = inception_stats if f_model in inception_models else imagenet_stats
return tfms_from_stats(stats, sz, aug_tfms, max_zoom=max_zoom, pad=pad, crop_type=crop_type,
tfm_y=tfm_y, sz_y=sz_y, pad_mode=pad_mode, norm_y=norm_y, scale=scale) | [
" Returns separate transformers of images for training and validation.\n Transformers are constructed according to the image statistics given by the model. (See tfms_from_stats)\n\n Arguments:\n f_model: model, pretrained or not pretrained\n "
] |
Please provide a description of the function:def get_image_files(c:PathOrStr, check_ext:bool=True, recurse=False)->FilePathList:
"Return list of files in `c` that are images. `check_ext` will filter to `image_extensions`."
return get_files(c, extensions=(image_extensions if check_ext else None), recurse=recurse) | [] |
Please provide a description of the function:def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {}
for o in annot_dict['categories']:
classes[o['id']] = o['name']
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[1],bb[0], bb[3]+bb[1], bb[2]+bb[0]])
id2cats[o['image_id']].append(classes[o['category_id']])
for o in annot_dict['images']:
if o['id'] in id2bboxes:
id2images[o['id']] = ifnone(prefix, '') + o['file_name']
ids = list(id2images.keys())
return [id2images[k] for k in ids], [[id2bboxes[k], id2cats[k]] for k in ids] | [] |
Please provide a description of the function:def bb_pad_collate(samples:BatchSamples, pad_idx:int=0) -> Tuple[FloatTensor, Tuple[LongTensor, LongTensor]]:
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
if isinstance(samples[0][1], int): return data_collate(samples)
max_len = max([len(s[1].data[1]) for s in samples])
bboxes = torch.zeros(len(samples), max_len, 4)
labels = torch.zeros(len(samples), max_len).long() + pad_idx
imgs = []
for i,s in enumerate(samples):
imgs.append(s[0].data[None])
bbs, lbls = s[1].data
if not (bbs.nelement() == 0):
bboxes[i,-len(lbls):] = bbs
labels[i,-len(lbls):] = tensor(lbls)
return torch.cat(imgs,0), (bboxes,labels) | [] |
Please provide a description of the function:def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
"Normalize `x` with `mean` and `std`."
return (x-mean[...,None,None]) / std[...,None,None] | [] |
Please provide a description of the function:def denormalize(x:TensorImage, mean:FloatTensor,std:FloatTensor, do_x:bool=True)->TensorImage:
"Denormalize `x` with `mean` and `std`."
return x.cpu().float()*std[...,None,None] + mean[...,None,None] if do_x else x.cpu() | [] |
Please provide a description of the function:def _normalize_batch(b:Tuple[Tensor,Tensor], mean:FloatTensor, std:FloatTensor, do_x:bool=True, do_y:bool=False)->Tuple[Tensor,Tensor]:
"`b` = `x`,`y` - normalize `x` array of imgs and `do_y` optionally `y`."
x,y = b
mean,std = mean.to(x.device),std.to(x.device)
if do_x: x = normalize(x,mean,std)
if do_y and len(y.shape) == 4: y = normalize(y,mean,std)
return x,y | [] |
Please provide a description of the function:def normalize_funcs(mean:FloatTensor, std:FloatTensor, do_x:bool=True, do_y:bool=False)->Tuple[Callable,Callable]:
"Create normalize/denormalize func using `mean` and `std`, can specify `do_y` and `device`."
mean,std = tensor(mean),tensor(std)
return (partial(_normalize_batch, mean=mean, std=std, do_x=do_x, do_y=do_y),
partial(denormalize, mean=mean, std=std, do_x=do_x)) | [] |
Please provide a description of the function:def channel_view(x:Tensor)->Tensor:
"Make channel the first axis of `x` and flatten remaining axes"
return x.transpose(0,1).contiguous().view(x.shape[1],-1) | [] |
Please provide a description of the function:def download_images(urls:Collection[str], dest:PathOrStr, max_pics:int=1000, max_workers:int=8, timeout=4):
"Download images listed in text file `urls` to path `dest`, at most `max_pics`"
urls = open(urls).read().strip().split("\n")[:max_pics]
dest = Path(dest)
dest.mkdir(exist_ok=True)
parallel(partial(_download_image_inner, dest, timeout=timeout), urls, max_workers=max_workers) | [] |
Please provide a description of the function:def resize_to(img, targ_sz:int, use_min:bool=False):
"Size to resize to, to hit `targ_sz` at same aspect ratio, in PIL coords (i.e w*h)"
w,h = img.size
min_sz = (min if use_min else max)(w,h)
ratio = targ_sz/min_sz
return int(w*ratio),int(h*ratio) | [] |
Please provide a description of the function:def verify_image(file:Path, idx:int, delete:bool, max_size:Union[int,Tuple[int,int]]=None, dest:Path=None, n_channels:int=3,
interp=PIL.Image.BILINEAR, ext:str=None, img_format:str=None, resume:bool=False, **kwargs):
"Check if the image in `file` exists, maybe resize it and copy it in `dest`."
try:
# deal with partially broken images as indicated by PIL warnings
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
with open(file, 'rb') as img_file: PIL.Image.open(img_file)
except Warning as w:
if "Possibly corrupt EXIF data" in str(w):
if delete: # green light to modify files
print(f"{file}: Removing corrupt EXIF data")
warnings.simplefilter("ignore")
# save EXIF-cleaned up image, which happens automatically
PIL.Image.open(file).save(file)
else: # keep user's files intact
print(f"{file}: Not removing corrupt EXIF data, pass `delete=True` to do that")
else: warnings.warn(w)
img = PIL.Image.open(file)
imgarr = np.array(img)
img_channels = 1 if len(imgarr.shape) == 2 else imgarr.shape[2]
if (max_size is not None and (img.height > max_size or img.width > max_size)) or img_channels != n_channels:
assert isinstance(dest, Path), "You should provide `dest` Path to save resized image"
dest_fname = dest/file.name
if ext is not None: dest_fname=dest_fname.with_suffix(ext)
if resume and os.path.isfile(dest_fname): return
if max_size is not None:
new_sz = resize_to(img, max_size)
img = img.resize(new_sz, resample=interp)
if n_channels == 3: img = img.convert("RGB")
img.save(dest_fname, img_format, **kwargs)
except Exception as e:
print(f'{e}')
if delete: file.unlink() | [] |
Please provide a description of the function:def verify_images(path:PathOrStr, delete:bool=True, max_workers:int=4, max_size:Union[int]=None, recurse:bool=False,
dest:PathOrStr='.', n_channels:int=3, interp=PIL.Image.BILINEAR, ext:str=None, img_format:str=None,
resume:bool=None, **kwargs):
"Check if the images in `path` aren't broken, maybe resize them and copy it in `dest`."
path = Path(path)
if resume is None and dest == '.': resume=False
dest = path/Path(dest)
os.makedirs(dest, exist_ok=True)
files = get_image_files(path, recurse=recurse)
func = partial(verify_image, delete=delete, max_size=max_size, dest=dest, n_channels=n_channels, interp=interp,
ext=ext, img_format=img_format, resume=resume, **kwargs)
parallel(func, files, max_workers=max_workers) | [] |
Please provide a description of the function:def _ll_pre_transform(self, train_tfm:List[Callable], valid_tfm:List[Callable]):
"Call `train_tfm` and `valid_tfm` after opening image, before converting from `PIL.Image`"
self.train.x.after_open = compose(train_tfm)
self.valid.x.after_open = compose(valid_tfm)
return self | [] |
Please provide a description of the function:def _db_pre_transform(self, train_tfm:List[Callable], valid_tfm:List[Callable]):
"Call `train_tfm` and `valid_tfm` after opening image, before converting from `PIL.Image`"
self.train_ds.x.after_open = compose(train_tfm)
self.valid_ds.x.after_open = compose(valid_tfm)
return self | [] |
Please provide a description of the function:def _presize(self, size:int, val_xtra_size:int=32, scale:Tuple[float]=(0.08, 1.0), ratio:Tuple[float]=(0.75, 4./3.),
interpolation:int=2):
"Resize images to `size` using `RandomResizedCrop`, passing along `kwargs` to train transform"
return self.pre_transform(
tvt.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation),
[tvt.Resize(size+val_xtra_size), tvt.CenterCrop(size)]) | [] |
Please provide a description of the function:def create_from_ll(cls, lls:LabelLists, bs:int=64, val_bs:int=None, ds_tfms:Optional[TfmList]=None,
num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None,
test:Optional[PathOrStr]=None, collate_fn:Callable=data_collate, size:int=None, no_check:bool=False,
resize_method:ResizeMethod=None, mult:int=None, padding_mode:str='reflection',
mode:str='bilinear', tfm_y:bool=False)->'ImageDataBunch':
"Create an `ImageDataBunch` from `LabelLists` `lls` with potential `ds_tfms`."
lls = lls.transform(tfms=ds_tfms, size=size, resize_method=resize_method, mult=mult, padding_mode=padding_mode,
mode=mode, tfm_y=tfm_y)
if test is not None: lls.add_test_folder(test)
return lls.databunch(bs=bs, val_bs=val_bs, dl_tfms=dl_tfms, num_workers=num_workers, collate_fn=collate_fn,
device=device, no_check=no_check) | [] |
Please provide a description of the function:def from_folder(cls, path:PathOrStr, train:PathOrStr='train', valid:PathOrStr='valid',
valid_pct=None, classes:Collection=None, **kwargs:Any)->'ImageDataBunch':
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
path=Path(path)
il = ImageList.from_folder(path)
if valid_pct is None: src = il.split_by_folder(train=train, valid=valid)
else: src = il.split_by_rand_pct(valid_pct)
src = src.label_from_folder(classes=classes)
return cls.create_from_ll(src, **kwargs) | [] |
Please provide a description of the function:def from_df(cls, path:PathOrStr, df:pd.DataFrame, folder:PathOrStr=None, label_delim:str=None, valid_pct:float=0.2,
fn_col:IntsOrStrs=0, label_col:IntsOrStrs=1, suffix:str='', **kwargs:Any)->'ImageDataBunch':
"Create from a `DataFrame` `df`."
src = (ImageList.from_df(df, path=path, folder=folder, suffix=suffix, cols=fn_col)
.split_by_rand_pct(valid_pct)
.label_from_df(label_delim=label_delim, cols=label_col))
return cls.create_from_ll(src, **kwargs) | [] |
Please provide a description of the function:def from_csv(cls, path:PathOrStr, folder:PathOrStr=None, label_delim:str=None, csv_labels:PathOrStr='labels.csv',
valid_pct:float=0.2, fn_col:int=0, label_col:int=1, suffix:str='', delimiter:str=None,
header:Optional[Union[int,str]]='infer', **kwargs:Any)->'ImageDataBunch':
"Create from a csv file in `path/csv_labels`."
path = Path(path)
df = pd.read_csv(path/csv_labels, header=header, delimiter=delimiter)
return cls.from_df(path, df, folder=folder, label_delim=label_delim, valid_pct=valid_pct,
fn_col=fn_col, label_col=label_col, suffix=suffix, **kwargs) | [] |
Please provide a description of the function:def from_lists(cls, path:PathOrStr, fnames:FilePathList, labels:Collection[str], valid_pct:float=0.2,
item_cls:Callable=None, **kwargs):
"Create from list of `fnames` in `path`."
item_cls = ifnone(item_cls, ImageList)
fname2label = {f:l for (f,l) in zip(fnames, labels)}
src = (item_cls(fnames, path=path).split_by_rand_pct(valid_pct)
.label_from_func(lambda x:fname2label[x]))
return cls.create_from_ll(src, **kwargs) | [] |
Please provide a description of the function:def from_name_func(cls, path:PathOrStr, fnames:FilePathList, label_func:Callable, valid_pct:float=0.2, **kwargs):
"Create from list of `fnames` in `path` with `label_func`."
src = ImageList(fnames, path=path).split_by_rand_pct(valid_pct)
return cls.create_from_ll(src.label_from_func(label_func), **kwargs) | [] |
Please provide a description of the function:def from_name_re(cls, path:PathOrStr, fnames:FilePathList, pat:str, valid_pct:float=0.2, **kwargs):
"Create from list of `fnames` in `path` with re expression `pat`."
pat = re.compile(pat)
def _get_label(fn):
if isinstance(fn, Path): fn = fn.as_posix()
res = pat.search(str(fn))
assert res,f'Failed to find "{pat}" in "{fn}"'
return res.group(1)
return cls.from_name_func(path, fnames, _get_label, valid_pct=valid_pct, **kwargs) | [] |
Please provide a description of the function:def single_from_classes(path:Union[Path, str], classes:Collection[str], ds_tfms:TfmList=None, **kwargs):
"Create an empty `ImageDataBunch` in `path` with `classes`. Typically used for inference."
warn(, DeprecationWarning)
sd = ImageList([], path=path, ignore_empty=True).split_none()
return sd.label_const(0, label_cls=CategoryList, classes=classes).transform(ds_tfms, **kwargs).databunch() | [
"This method is deprecated and will be removed in a future version, use `load_learner` after\n `Learner.export()`"
] |
Please provide a description of the function:def batch_stats(self, funcs:Collection[Callable]=None, ds_type:DatasetType=DatasetType.Train)->Tensor:
"Grab a batch of data and call reduction function `func` per channel"
funcs = ifnone(funcs, [torch.mean,torch.std])
x = self.one_batch(ds_type=ds_type, denorm=False)[0].cpu()
return [func(channel_view(x), 1) for func in funcs] | [] |
Please provide a description of the function:def normalize(self, stats:Collection[Tensor]=None, do_x:bool=True, do_y:bool=False)->None:
"Add normalize transform using `stats` (defaults to `DataBunch.batch_stats`)"
if getattr(self,'norm',False): raise Exception('Can not call normalize twice')
if stats is None: self.stats = self.batch_stats()
else: self.stats = stats
self.norm,self.denorm = normalize_funcs(*self.stats, do_x=do_x, do_y=do_y)
self.add_tfm(self.norm)
return self | [] |
Please provide a description of the function:def open(self, fn):
"Open image in `fn`, subclass and overwrite for custom behavior."
return open_image(fn, convert_mode=self.convert_mode, after_open=self.after_open) | [] |
Please provide a description of the function:def from_folder(cls, path:PathOrStr='.', extensions:Collection[str]=None, **kwargs)->ItemList:
"Get the list of files in `path` that have an image suffix. `recurse` determines if we search subfolders."
extensions = ifnone(extensions, image_extensions)
return super().from_folder(path=path, extensions=extensions, **kwargs) | [] |
Please provide a description of the function:def from_df(cls, df:DataFrame, path:PathOrStr, cols:IntsOrStrs=0, folder:PathOrStr=None, suffix:str='', **kwargs)->'ItemList':
"Get the filenames in `cols` of `df` with `folder` in front of them, `suffix` at the end."
suffix = suffix or ''
res = super().from_df(df, path=path, cols=cols, **kwargs)
pref = f'{res.path}{os.path.sep}'
if folder is not None: pref += f'{folder}{os.path.sep}'
res.items = np.char.add(np.char.add(pref, res.items.astype(str)), suffix)
return res | [] |
Please provide a description of the function:def from_csv(cls, path:PathOrStr, csv_name:str, header:str='infer', **kwargs)->'ItemList':
"Get the filenames in `path/csv_name` opened with `header`."
path = Path(path)
df = pd.read_csv(path/csv_name, header=header)
return cls.from_df(df, path=path, **kwargs) | [] |
Please provide a description of the function:def show_xys(self, xs, ys, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show the `xs` (inputs) and `ys` (targets) on a figure of `figsize`."
rows = int(np.ceil(math.sqrt(len(xs))))
axs = subplots(rows, rows, imgsize=imgsize, figsize=figsize)
for x,y,ax in zip(xs, ys, axs.flatten()): x.show(ax=ax, y=y, **kwargs)
for ax in axs.flatten()[len(xs):]: ax.axis('off')
plt.tight_layout() | [] |
Please provide a description of the function:def show_xyzs(self, xs, ys, zs, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."
if self._square_show_res:
title = 'Ground truth\nPredictions'
rows = int(np.ceil(math.sqrt(len(xs))))
axs = subplots(rows, rows, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=12)
for x,y,z,ax in zip(xs,ys,zs,axs.flatten()): x.show(ax=ax, title=f'{str(y)}\n{str(z)}', **kwargs)
for ax in axs.flatten()[len(xs):]: ax.axis('off')
else:
title = 'Ground truth/Predictions'
axs = subplots(len(xs), 2, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], y=y, **kwargs)
x.show(ax=axs[i,1], y=z, **kwargs) | [] |
Please provide a description of the function:def generate_classes(self, items):
"Generate classes from unique `items` and add `background`."
classes = super().generate_classes([o[1] for o in items])
classes = ['background'] + list(classes)
return classes | [] |
Please provide a description of the function:def show_xys(self, xs, ys, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show the `xs` (inputs) and `ys`(targets) on a figure of `figsize`."
axs = subplots(len(xs), 2, imgsize=imgsize, figsize=figsize)
for i, (x,y) in enumerate(zip(xs,ys)):
x.show(ax=axs[i,0], **kwargs)
y.show(ax=axs[i,1], **kwargs)
plt.tight_layout() | [] |
Please provide a description of the function:def show_xyzs(self, xs, ys, zs, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Show `xs` (inputs), `ys` (targets) and `zs` (predictions) on a figure of `figsize`."
title = 'Input / Prediction / Target'
axs = subplots(len(xs), 3, imgsize=imgsize, figsize=figsize, title=title, weight='bold', size=14)
for i,(x,y,z) in enumerate(zip(xs,ys,zs)):
x.show(ax=axs[i,0], **kwargs)
y.show(ax=axs[i,2], **kwargs)
z.show(ax=axs[i,1], **kwargs) | [] |
Please provide a description of the function:def gpu_mem_get(id=None):
"get total, used and free memory (in MBs) for gpu `id`. if `id` is not passed, currently selected torch device is used"
if not use_gpu: return GPUMemory(0, 0, 0)
if id is None: id = torch.cuda.current_device()
try:
handle = pynvml.nvmlDeviceGetHandleByIndex(id)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
return GPUMemory(*(map(b2mb, [info.total, info.free, info.used])))
except:
return GPUMemory(0, 0, 0) | [] |
Please provide a description of the function:def gpu_with_max_free_mem():
"get [gpu_id, its_free_ram] for the first gpu with highest available RAM"
mem_all = gpu_mem_get_all()
if not len(mem_all): return None, 0
free_all = np.array([x.free for x in mem_all])
id = np.argmax(free_all)
return id, free_all[id] | [] |
Please provide a description of the function:def gpu_mem_trace(func):
"A decorator that runs `GPUMemTrace` w/ report on func"
@functools.wraps(func)
def wrapper(*args, **kwargs):
with GPUMemTrace(ctx=func.__qualname__, on_exit_report=True):
return func(*args, **kwargs)
return wrapper | [] |
Please provide a description of the function:def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
#Removed from debugging
columns = df.columns
#.drop('index')
for col in columns:
col_type = df[col].dtype
if str(col_type) != 'category' and col_type != 'datetime64[ns]' and col_type != bool:
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
#if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
#df[col] = df[col].astype(np.float16)
#Sometimes causes and error and had to remove
if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
print('Error '+col+' Value would be a float64. Disregarding.')
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df | [
" iterate through all the columns of a dataframe and modify the data type\n to reduce memory usage.\n "
] |
Please provide a description of the function:def _get_ctx(self, subctx=None):
"Return ' (ctx: subctx)' or ' (ctx)' or ' (subctx)' or '' depending on this and constructor arguments"
l = []
if self.ctx is not None: l.append(self.ctx)
if subctx is not None: l.append(subctx)
return '' if len(l) == 0 else f" ({': '.join(l)})" | [] |
Please provide a description of the function:def _learner_distributed(learn:Learner, cuda_id:int, cache_dir:PathOrStr='tmp'):
"Put `learn` on distributed training with `cuda_id`."
learn.callbacks.append(DistributedTrainer(learn, cuda_id))
learn.callbacks.append(DistributedRecorder(learn, cuda_id, cache_dir))
return learn | [] |
Please provide a description of the function:def xresnet18(pretrained=False, **kwargs):
model = XResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet18']))
return model | [
"Constructs a XResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n "
] |
Please provide a description of the function:def xresnet50_2(pretrained=False, **kwargs):
model = XResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet50']))
return model | [
"Constructs a XResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n "
] |
Please provide a description of the function:def loss_batch(model:nn.Module, xb:Tensor, yb:Tensor, loss_func:OptLossFunc=None, opt:OptOptimizer=None,
cb_handler:Optional[CallbackHandler]=None)->Tuple[Union[Tensor,int,float,str]]:
"Calculate loss and metrics for a batch, call out to callbacks as necessary."
cb_handler = ifnone(cb_handler, CallbackHandler())
if not is_listy(xb): xb = [xb]
if not is_listy(yb): yb = [yb]
out = model(*xb)
out = cb_handler.on_loss_begin(out)
if not loss_func: return to_detach(out), yb[0].detach()
loss = loss_func(out, *yb)
if opt is not None:
loss,skip_bwd = cb_handler.on_backward_begin(loss)
if not skip_bwd: loss.backward()
if not cb_handler.on_backward_end(): opt.step()
if not cb_handler.on_step_end(): opt.zero_grad()
return loss.detach().cpu() | [] |
Please provide a description of the function:def get_preds(model:nn.Module, dl:DataLoader, pbar:Optional[PBar]=None, cb_handler:Optional[CallbackHandler]=None,
activ:nn.Module=None, loss_func:OptLossFunc=None, n_batch:Optional[int]=None) -> List[Tensor]:
"Tuple of predictions and targets, and optional losses (if `loss_func`) using `dl`, max batches `n_batch`."
res = [torch.cat(o).cpu() for o in
zip(*validate(model, dl, cb_handler=cb_handler, pbar=pbar, average=False, n_batch=n_batch))]
if loss_func is not None:
with NoneReduceOnCPU(loss_func) as lf: res.append(lf(res[0], res[1]))
if activ is not None: res[0] = activ(res[0])
return res | [] |
Please provide a description of the function:def validate(model:nn.Module, dl:DataLoader, loss_func:OptLossFunc=None, cb_handler:Optional[CallbackHandler]=None,
pbar:Optional[PBar]=None, average=True, n_batch:Optional[int]=None)->Iterator[Tuple[Union[Tensor,int],...]]:
"Calculate `loss_func` of `model` on `dl` in evaluation mode."
model.eval()
with torch.no_grad():
val_losses,nums = [],[]
if cb_handler: cb_handler.set_dl(dl)
for xb,yb in progress_bar(dl, parent=pbar, leave=(pbar is not None)):
if cb_handler: xb, yb = cb_handler.on_batch_begin(xb, yb, train=False)
val_loss = loss_batch(model, xb, yb, loss_func, cb_handler=cb_handler)
val_losses.append(val_loss)
if not is_listy(yb): yb = [yb]
nums.append(yb[0].shape[0])
if cb_handler and cb_handler.on_batch_end(val_losses[-1]): break
if n_batch and (len(nums)>=n_batch): break
nums = np.array(nums, dtype=np.float32)
if average: return (to_np(torch.stack(val_losses)) * nums).sum() / nums.sum()
else: return val_losses | [] |
Please provide a description of the function:def train_epoch(model:nn.Module, dl:DataLoader, opt:optim.Optimizer, loss_func:LossFunction)->None:
"Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`."
model.train()
for xb,yb in dl:
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad() | [] |
Please provide a description of the function:def fit(epochs:int, learn:BasicLearner, callbacks:Optional[CallbackList]=None, metrics:OptMetrics=None)->None:
"Fit the `model` on `data` and learn using `loss_func` and `opt`."
assert len(learn.data.train_dl) != 0, f
cb_handler = CallbackHandler(callbacks, metrics)
pbar = master_bar(range(epochs))
cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics)
exception=False
try:
for epoch in pbar:
learn.model.train()
cb_handler.set_dl(learn.data.train_dl)
cb_handler.on_epoch_begin()
for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
xb, yb = cb_handler.on_batch_begin(xb, yb)
loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
if cb_handler.on_batch_end(loss): break
if not cb_handler.skip_validate and not learn.data.empty_val:
val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func,
cb_handler=cb_handler, pbar=pbar)
else: val_loss=None
if cb_handler.on_epoch_end(val_loss): break
except Exception as e:
exception = e
raise
finally: cb_handler.on_train_end(exception) | [
"Your training dataloader is empty, can't train a model.\n Use a smaller batch size (batch size={learn.data.train_dl.batch_size} for {len(learn.data.train_dl.dataset)} elements)."
] |
Please provide a description of the function:def load_learner(path:PathOrStr, file:PathLikeOrBinaryStream='export.pkl', test:ItemList=None, **db_kwargs):
"Load a `Learner` object saved with `export_state` in `path/file` with empty data, optionally add `test` and load on `cpu`. `file` can be file-like (file or buffer)"
source = Path(path)/file if is_pathlike(file) else file
state = torch.load(source, map_location='cpu') if defaults.device == torch.device('cpu') else torch.load(source)
model = state.pop('model')
src = LabelLists.load_state(path, state.pop('data'))
if test is not None: src.add_test(test)
data = src.databunch(**db_kwargs)
cb_state = state.pop('cb_state')
clas_func = state.pop('cls')
res = clas_func(data, model, **state)
res.callback_fns = state['callback_fns'] #to avoid duplicates
res.callbacks = [load_callback(c,s, res) for c,s in cb_state.items()]
return res | [] |
Please provide a description of the function:def on_train_begin(self, pbar:PBar, metrics_names:Collection[str], **kwargs:Any)->None:
"Initialize recording status at beginning of training."
self.pbar = pbar
self.names = ['epoch', 'train_loss'] if self.no_val else ['epoch', 'train_loss', 'valid_loss']
self.metrics_names = metrics_names
self.names += self.metrics_names
if hasattr(self, '_added_met_names'): self.names += self._added_met_names
if self.add_time: self.names.append('time')
if not self.silent: self.pbar.write(self.names, table=True)
self.losses,self.val_losses,self.lrs,self.moms,self.metrics,self.nb_batches = [],[],[],[],[],[] | [] |
Please provide a description of the function:def on_batch_begin(self, train, **kwargs:Any)->None:
"Record learning rate and momentum at beginning of batch."
if train:
self.lrs.append(self.opt.lr)
self.moms.append(self.opt.mom) | [] |
Please provide a description of the function:def on_backward_begin(self, smooth_loss:Tensor, **kwargs:Any)->None:
"Record the loss before any other callback has a chance to modify it."
self.losses.append(smooth_loss)
if self.pbar is not None and hasattr(self.pbar,'child'):
self.pbar.child.comment = f'{smooth_loss:.4f}' | [] |
Please provide a description of the function:def on_epoch_end(self, epoch:int, num_batch:int, smooth_loss:Tensor,
last_metrics=MetricsList, **kwargs:Any)->bool:
"Save epoch info: num_batch, smooth_loss, metrics."
self.nb_batches.append(num_batch)
if last_metrics is not None: self.val_losses.append(last_metrics[0])
else: last_metrics = [] if self.no_val else [None]
if len(last_metrics) > 1: self.metrics.append(last_metrics[1:])
self.format_stats([epoch, smooth_loss] + last_metrics) | [] |
Please provide a description of the function:def format_stats(self, stats:TensorOrNumList)->None:
"Format stats before printing."
str_stats = []
for name,stat in zip(self.names,stats):
str_stats.append('#na#' if stat is None else str(stat) if isinstance(stat, int) else f'{stat:.6f}')
if self.add_time: str_stats.append(format_time(time() - self.start_epoch))
if not self.silent: self.pbar.write(str_stats, table=True) | [] |
Please provide a description of the function:def add_metric_names(self, names):
"Add `names` to the inner metric names."
if hasattr(self, '_added_met_names'): self._added_met_names += names
else: self._added_met_names = names | [] |
Please provide a description of the function:def plot_lr(self, show_moms=False, skip_start:int=0, skip_end:int=0, return_fig:bool=None)->Optional[plt.Figure]:
"Plot learning rate, `show_moms` to include momentum."
lrs = self._split_list(self.lrs, skip_start, skip_end)
iterations = self._split_list(range_of(self.lrs), skip_start, skip_end)
if show_moms:
moms = self._split_list(self.moms, skip_start, skip_end)
fig, axs = plt.subplots(1,2, figsize=(12,4))
axs[0].plot(iterations, lrs)
axs[0].set_xlabel('Iterations')
axs[0].set_ylabel('Learning Rate')
axs[1].plot(iterations, moms)
axs[1].set_xlabel('Iterations')
axs[1].set_ylabel('Momentum')
else:
fig, ax = plt.subplots()
ax.plot(iterations, lrs)
ax.set_xlabel('Iterations')
ax.set_ylabel('Learning Rate')
if ifnone(return_fig, defaults.return_fig): return fig
if not IN_NOTEBOOK: plot_sixel(fig) | [] |
Please provide a description of the function:def plot(self, skip_start:int=10, skip_end:int=5, suggestion:bool=False, return_fig:bool=None,
**kwargs)->Optional[plt.Figure]:
"Plot learning rate and losses, trimmed between `skip_start` and `skip_end`. Optionally plot and return min gradient"
lrs = self._split_list(self.lrs, skip_start, skip_end)
losses = self._split_list(self.losses, skip_start, skip_end)
losses = [x.item() for x in losses]
if 'k' in kwargs: losses = self.smoothen_by_spline(lrs, losses, **kwargs)
fig, ax = plt.subplots(1,1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0e'))
if suggestion:
try: mg = (np.gradient(np.array(losses))).argmin()
except:
print("Failed to compute the gradients, there might not be enough points.")
return
print(f"Min numerical gradient: {lrs[mg]:.2E}")
ax.plot(lrs[mg],losses[mg],markersize=10,marker='o',color='red')
self.min_grad_lr = lrs[mg]
if ifnone(return_fig, defaults.return_fig): return fig
if not IN_NOTEBOOK: plot_sixel(fig) | [] |
Please provide a description of the function:def plot_losses(self, skip_start:int=0, skip_end:int=0, return_fig:bool=None)->Optional[plt.Figure]:
"Plot training and validation losses."
fig, ax = plt.subplots(1,1)
losses = self._split_list(self.losses, skip_start, skip_end)
iterations = self._split_list(range_of(self.losses), skip_start, skip_end)
ax.plot(iterations, losses, label='Train')
val_iter = self._split_list_val(np.cumsum(self.nb_batches), skip_start, skip_end)
val_losses = self._split_list_val(self.val_losses, skip_start, skip_end)
ax.plot(val_iter, val_losses, label='Validation')
ax.set_ylabel('Loss')
ax.set_xlabel('Batches processed')
ax.legend()
if ifnone(return_fig, defaults.return_fig): return fig
if not IN_NOTEBOOK: plot_sixel(fig) | [] |
Please provide a description of the function:def plot_metrics(self, skip_start:int=0, skip_end:int=0, return_fig:bool=None)->Optional[plt.Figure]:
"Plot metrics collected during training."
assert len(self.metrics) != 0, "There are no metrics to plot."
fig, axes = plt.subplots(len(self.metrics[0]),1,figsize=(6, 4*len(self.metrics[0])))
val_iter = self._split_list_val(np.cumsum(self.nb_batches), skip_start, skip_end)
axes = axes.flatten() if len(self.metrics[0]) != 1 else [axes]
for i, ax in enumerate(axes):
values = [met[i] for met in self.metrics]
values = self._split_list_val(values, skip_start, skip_end)
ax.plot(val_iter, values)
ax.set_ylabel(str(self.metrics_names[i]))
ax.set_xlabel('Batches processed')
if ifnone(return_fig, defaults.return_fig): return fig
if not IN_NOTEBOOK: plot_sixel(fig) | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.