Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def join_paths(fnames:FilePathList, path:PathOrStr='.')->Collection[Path]: "Join `path` to every file name in `fnames`." path = Path(path) return [join_path(o,path) for o in fnames]
[]
Please provide a description of the function:def loadtxt_str(path:PathOrStr)->np.ndarray: "Return `ndarray` of `str` of lines of text from `path`." with open(path, 'r') as f: lines = f.readlines() return np.array([l.strip() for l in lines])
[]
Please provide a description of the function:def save_texts(fname:PathOrStr, texts:Collection[str]): "Save in `fname` the content of `texts`." with open(fname, 'w') as f: for t in texts: f.write(f'{t}\n')
[]
Please provide a description of the function:def df_names_to_idx(names:IntsOrStrs, df:DataFrame): "Return the column indexes of `names` in `df`." if not is_listy(names): names = [names] if isinstance(names[0], int): return names return [df.columns.get_loc(c) for c in names]
[]
Please provide a description of the function:def one_hot(x:Collection[int], c:int): "One-hot encode `x` with `c` classes." res = np.zeros((c,), np.float32) res[listify(x)] = 1. return res
[]
Please provide a description of the function:def index_row(a:Union[Collection,pd.DataFrame,pd.Series], idxs:Collection[int])->Any: "Return the slice of `a` corresponding to `idxs`." if a is None: return a if isinstance(a,(pd.DataFrame,pd.Series)): res = a.iloc[idxs] if isinstance(res,(pd.DataFrame,pd.Series)): return res.copy() return res return a[idxs]
[]
Please provide a description of the function:def func_args(func)->bool: "Return the arguments of `func`." code = func.__code__ return code.co_varnames[:code.co_argcount]
[]
Please provide a description of the function:def split_kwargs_by_func(kwargs, func): "Split `kwargs` between those expected by `func` and the others." args = func_args(func) func_kwargs = {a:kwargs.pop(a) for a in args if a in kwargs} return func_kwargs, kwargs
[]
Please provide a description of the function:def array(a, dtype:type=None, **kwargs)->np.ndarray: "Same as `np.array` but also handles generators. `kwargs` are passed to `np.array` with `dtype`." if not isinstance(a, collections.Sized) and not getattr(a,'__array_interface__',False): a = list(a) if np.int_==np.int32 and dtype is None and is_listy(a) and len(a) and isinstance(a[0],int): dtype=np.int64 return np.array(a, dtype=dtype, **kwargs)
[]
Please provide a description of the function:def text2html_table(items:Collection[Collection[str]])->str: "Put the texts in `items` in an HTML table, `widths` are the widths of the columns in %." html_code = f html_code += f for i in items[0]: html_code += f" <th>{_treat_html(i)}</th>" html_code += f" </tr>\n </thead>\n <tbody>" html_code += " <tbody>" for line in items[1:]: html_code += " <tr>" for i in line: html_code += f" <td>{_treat_html(i)}</td>" html_code += " </tr>" html_code += " </tbody>\n</table>" return html_code
[ "<table border=\"1\" class=\"dataframe\">", " <thead>\\n <tr style=\"text-align: right;\">\\n" ]
Please provide a description of the function:def parallel(func, arr:Collection, max_workers:int=None): "Call `func` on every element of `arr` in parallel using `max_workers`." max_workers = ifnone(max_workers, defaults.cpus) if max_workers<2: results = [func(o,i) for i,o in progress_bar(enumerate(arr), total=len(arr))] else: with ProcessPoolExecutor(max_workers=max_workers) as ex: futures = [ex.submit(func,o,i) for i,o in enumerate(arr)] results = [] for f in progress_bar(concurrent.futures.as_completed(futures), total=len(arr)): results.append(f.result()) if any([o is not None for o in results]): return results
[]
Please provide a description of the function:def subplots(rows:int, cols:int, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, title=None, **kwargs): "Like `plt.subplots` but with consistent axs shape, `kwargs` passed to `fig.suptitle` with `title`" figsize = ifnone(figsize, (imgsize*cols, imgsize*rows)) fig, axs = plt.subplots(rows,cols,figsize=figsize) if rows==cols==1: axs = [[axs]] # subplots(1,1) returns Axes, not [Axes] elif (rows==1 and cols!=1) or (cols==1 and rows!=1): axs = [axs] if title is not None: fig.suptitle(title, **kwargs) return array(axs)
[]
Please provide a description of the function:def show_some(items:Collection, n_max:int=5, sep:str=','): "Return the representation of the first `n_max` elements in `items`." if items is None or len(items) == 0: return '' res = sep.join([f'{o}' for o in items[:n_max]]) if len(items) > n_max: res += '...' return res
[]
Please provide a description of the function:def get_tmp_file(dir=None): "Create and return a tmp filename, optionally at a specific path. `os.remove` when done with it." with tempfile.NamedTemporaryFile(delete=False, dir=dir) as f: return f.name
[]
Please provide a description of the function:def compose(funcs:List[Callable])->Callable: "Compose `funcs`" def compose_(funcs, x, *args, **kwargs): for f in listify(funcs): x = f(x, *args, **kwargs) return x return partial(compose_, funcs)
[]
Please provide a description of the function:def show(self, ax:plt.Axes, **kwargs): "Subclass this method if you want to customize the way this `ItemBase` is shown on `ax`." ax.set_title(str(self))
[]
Please provide a description of the function:def init_params(net): '''Init layer parameters.''' for m in net.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal(m.weight, mode='fan_out') if m.bias: init.constant(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant(m.weight, 1) init.constant(m.bias, 0) elif isinstance(m, nn.Linear): init.normal(m.weight, std=1e-3) if m.bias: init.constant(m.bias, 0)
[]
Please provide a description of the function:def conv_bn_lrelu(ni:int, nf:int, ks:int=3, stride:int=1)->nn.Sequential: "Create a seuence Conv2d->BatchNorm2d->LeakyReLu layer." return nn.Sequential( nn.Conv2d(ni, nf, kernel_size=ks, bias=False, stride=stride, padding=ks//2), nn.BatchNorm2d(nf), nn.LeakyReLU(negative_slope=0.1, inplace=True))
[]
Please provide a description of the function:def make_group_layer(self, ch_in:int, num_blocks:int, stride:int=1): "starts with conv layer - `ch_in` channels in - then has `num_blocks` `ResLayer`" return [conv_bn_lrelu(ch_in, ch_in*2,stride=stride) ] + [(ResLayer(ch_in*2)) for i in range(num_blocks)]
[]
Please provide a description of the function:def collab_learner(data, n_factors:int=None, use_nn:bool=False, emb_szs:Dict[str,int]=None, layers:Collection[int]=None, ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, bn_final:bool=False, **learn_kwargs)->Learner: "Create a Learner for collaborative filtering on `data`." emb_szs = data.get_emb_szs(ifnone(emb_szs, {})) u,m = data.train_ds.x.classes.values() if use_nn: model = EmbeddingNN(emb_szs=emb_szs, layers=layers, ps=ps, emb_drop=emb_drop, y_range=y_range, use_bn=use_bn, bn_final=bn_final, **learn_kwargs) else: model = EmbeddingDotBias(n_factors, len(u), len(m), y_range=y_range) return CollabLearner(data, model, **learn_kwargs)
[]
Please provide a description of the function:def from_df(cls, ratings:DataFrame, valid_pct:float=0.2, user_name:Optional[str]=None, item_name:Optional[str]=None, rating_name:Optional[str]=None, test:DataFrame=None, seed:int=None, path:PathOrStr='.', bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False) -> 'CollabDataBunch': "Create a `DataBunch` suitable for collaborative filtering from `ratings`." user_name = ifnone(user_name, ratings.columns[0]) item_name = ifnone(item_name, ratings.columns[1]) rating_name = ifnone(rating_name,ratings.columns[2]) cat_names = [user_name,item_name] src = (CollabList.from_df(ratings, cat_names=cat_names, procs=Categorify) .split_by_rand_pct(valid_pct=valid_pct, seed=seed).label_from_df(cols=rating_name)) if test is not None: src.add_test(CollabList.from_df(test, cat_names=cat_names)) return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check)
[]
Please provide a description of the function:def get_idx(self, arr:Collection, is_item:bool=True): "Fetch item or user (based on `is_item`) for all in `arr`. (Set model to `cpu` and no grad.)" m = self.model.eval().cpu() requires_grad(m,False) u_class,i_class = self.data.train_ds.x.classes.values() classes = i_class if is_item else u_class c2i = {v:k for k,v in enumerate(classes)} try: return tensor([c2i[o] for o in arr]) except Exception as e: print(f)
[ "You're trying to access {'an item' if is_item else 'a user'} that isn't in the training data.\n If it was in your original data, it may have been split such that it's only in the validation set now." ]
Please provide a description of the function:def bias(self, arr:Collection, is_item:bool=True): "Bias for item or user (based on `is_item`) for all in `arr`. (Set model to `cpu` and no grad.)" idx = self.get_idx(arr, is_item) m = self.model layer = m.i_bias if is_item else m.u_bias return layer(idx).squeeze()
[]
Please provide a description of the function:def weight(self, arr:Collection, is_item:bool=True): "Bias for item or user (based on `is_item`) for all in `arr`. (Set model to `cpu` and no grad.)" idx = self.get_idx(arr, is_item) m = self.model layer = m.i_weight if is_item else m.u_weight return layer(idx)
[]
Please provide a description of the function:def draw_tree(t, df, size=10, ratio=0.6, precision=0): s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True, special_characters=True, rotate=True, precision=precision) IPython.display.display(graphviz.Source(re.sub('Tree {', f'Tree {{ size={size}; ratio={ratio}', s)))
[ " Draws a representation of a random forest in IPython.\n Parameters:\n -----------\n t: The tree you wish to draw\n df: The data used to train the tree. This is used to get the names of the features.\n " ]
Please provide a description of the function:def get_sample(df,n): idxs = sorted(np.random.permutation(len(df))[:n]) return df.iloc[idxs].copy()
[ " Gets a random sample of n rows from df, without replacement.\n Parameters:\n -----------\n df: A pandas data frame, that you wish to sample from.\n n: The number of rows you wish to sample.\n Returns:\n --------\n return value: A random sample of n rows of df.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n >>> get_sample(df, 2)\n col1 col2\n 1 2 b\n 2 3 a\n " ]
Please provide a description of the function:def add_datepart(df, fldname, drop=True, time=False, errors="raise"): fld = df[fldname] fld_dtype = fld.dtype if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype): fld_dtype = np.datetime64 if not np.issubdtype(fld_dtype, np.datetime64): df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors) targ_pre = re.sub('[Dd]ate$', '', fldname) attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'] if time: attr = attr + ['Hour', 'Minute', 'Second'] for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower()) df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9 if drop: df.drop(fldname, axis=1, inplace=True)
[ "add_datepart converts a column of df from a datetime64 to many columns containing\n the information from the date. This applies changes inplace.\n Parameters:\n -----------\n df: A pandas data frame. df gain several new columns.\n fldname: A string that is the name of the date column you wish to expand.\n If it is not a datetime64 series, it will be converted to one with pd.to_datetime.\n drop: If true then the original date column will be removed.\n time: If true time features: Hour, Minute, Second will be added.\n Examples:\n ---------\n >>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })\n >>> df\n A\n 0 2000-03-11\n 1 2000-03-12\n 2 2000-03-13\n >>> add_datepart(df, 'A')\n >>> df\n AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed\n 0 2000 3 10 11 5 71 False False False False False False 952732800\n 1 2000 3 10 12 6 72 False False False False False False 952819200\n 2 2000 3 11 13 0 73 False False False False False False 952905600\n " ]
Please provide a description of the function:def train_cats(df): for n,c in df.items(): if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
[ "Change any columns of strings in a panda's dataframe to a column of\n categorical values. This applies the changes inplace.\n Parameters:\n -----------\n df: A pandas dataframe. Any columns of strings will be changed to\n categorical values.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n note the type of col2 is string\n >>> train_cats(df)\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n now the type of col2 is category\n " ]
Please provide a description of the function:def apply_cats(df, trn): for n,c in df.items(): if (n in trn.columns) and (trn[n].dtype.name=='category'): df[n] = c.astype('category').cat.as_ordered() df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True)
[ "Changes any columns of strings in df into categorical variables using trn as\n a template for the category codes.\n Parameters:\n -----------\n df: A pandas dataframe. Any columns of strings will be changed to\n categorical values. The category codes are determined by trn.\n trn: A pandas dataframe. When creating a category for df, it looks up the\n what the category's code were in trn and makes those the category codes\n for df.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n note the type of col2 is string\n >>> train_cats(df)\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n now the type of col2 is category {a : 1, b : 2}\n >>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})\n >>> apply_cats(df2, df)\n col1 col2\n 0 1 b\n 1 2 a\n 2 3 a\n now the type of col is category {a : 1, b : 2}\n " ]
Please provide a description of the function:def fix_missing(df, col, name, na_dict): if is_numeric_dtype(col): if pd.isnull(col).sum() or (name in na_dict): df[name+'_na'] = pd.isnull(col) filler = na_dict[name] if name in na_dict else col.median() df[name] = col.fillna(filler) na_dict[name] = filler return na_dict
[ " Fill missing data in a column of df with the median, and add a {name}_na column\n which specifies if the data was missing.\n Parameters:\n -----------\n df: The data frame that will be changed.\n col: The column of data to fix by filling in missing data.\n name: The name of the new filled column in df.\n na_dict: A dictionary of values to create na's of and the value to insert. If\n name is not a key of na_dict the median will fill any missing data. Also\n if name is not a key of na_dict and there is no missing data in col, then\n no {name}_na column is not created.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n >>> fix_missing(df, df['col1'], 'col1', {})\n >>> df\n col1 col2 col1_na\n 0 1 5 False\n 1 2 2 True\n 2 3 2 False\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n >>> fix_missing(df, df['col2'], 'col2', {})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})\n >>> df\n col1 col2\n 0 1 5\n 1 nan 2\n 2 3 2\n >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500})\n >>> df\n col1 col2 col1_na\n 0 1 5 False\n 1 500 2 True\n 2 3 2 False\n " ]
Please provide a description of the function:def numericalize(df, col, name, max_n_cat): if not is_numeric_dtype(col) and ( max_n_cat is None or len(col.cat.categories)>max_n_cat): df[name] = pd.Categorical(col).codes+1
[ " Changes the column col from a categorical type to it's integer codes.\n Parameters:\n -----------\n df: A pandas dataframe. df[name] will be filled with the integer codes from\n col.\n col: The column you wish to change into the categories.\n name: The column name you wish to insert into df. This column will hold the\n integer codes.\n max_n_cat: If col has more categories than max_n_cat it will not change the\n it to its integer codes. If max_n_cat is None, then col will always be\n converted.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n note the type of col2 is string\n >>> train_cats(df)\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n now the type of col2 is category { a : 1, b : 2}\n >>> numericalize(df, df['col2'], 'col3', None)\n col1 col2 col3\n 0 1 a 1\n 1 2 b 2\n 2 3 a 1\n " ]
Please provide a description of the function:def proc_df(df, y_fld=None, skip_flds=None, ignore_flds=None, do_scale=False, na_dict=None, preproc_fn=None, max_n_cat=None, subset=None, mapper=None): if not ignore_flds: ignore_flds=[] if not skip_flds: skip_flds=[] if subset: df = get_sample(df,subset) else: df = df.copy() ignored_flds = df.loc[:, ignore_flds] df.drop(ignore_flds, axis=1, inplace=True) if preproc_fn: preproc_fn(df) if y_fld is None: y = None else: if not is_numeric_dtype(df[y_fld]): df[y_fld] = pd.Categorical(df[y_fld]).codes y = df[y_fld].values skip_flds += [y_fld] df.drop(skip_flds, axis=1, inplace=True) if na_dict is None: na_dict = {} else: na_dict = na_dict.copy() na_dict_initial = na_dict.copy() for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict) if len(na_dict_initial.keys()) > 0: df.drop([a + '_na' for a in list(set(na_dict.keys()) - set(na_dict_initial.keys()))], axis=1, inplace=True) if do_scale: mapper = scale_vars(df, mapper) for n,c in df.items(): numericalize(df, c, n, max_n_cat) df = pd.get_dummies(df, dummy_na=True) df = pd.concat([ignored_flds, df], axis=1) res = [df, y, na_dict] if do_scale: res = res + [mapper] return res
[ " proc_df takes a data frame df and splits off the response variable, and\n changes the df into an entirely numeric dataframe. For each column of df \n which is not in skip_flds nor in ignore_flds, na values are replaced by the\n median value of the column.\n Parameters:\n -----------\n df: The data frame you wish to process.\n y_fld: The name of the response variable\n skip_flds: A list of fields that dropped from df.\n ignore_flds: A list of fields that are ignored during processing.\n do_scale: Standardizes each column in df. Takes Boolean Values(True,False)\n na_dict: a dictionary of na columns to add. Na columns are also added if there\n are any missing values.\n preproc_fn: A function that gets applied to df.\n max_n_cat: The maximum number of categories to break into dummy values, instead\n of integer codes.\n subset: Takes a random subset of size subset from df.\n mapper: If do_scale is set as True, the mapper variable\n calculates the values used for scaling of variables during training time (mean and standard deviation).\n Returns:\n --------\n [x, y, nas, mapper(optional)]:\n x: x is the transformed version of df. x will not have the response variable\n and is entirely numeric.\n y: y is the response variable\n nas: returns a dictionary of which nas it created, and the associated median.\n mapper: A DataFrameMapper which stores the mean and standard deviation of the corresponding continuous\n variables which is then used for scaling of during test-time.\n Examples:\n ---------\n >>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n note the type of col2 is string\n >>> train_cats(df)\n >>> df\n col1 col2\n 0 1 a\n 1 2 b\n 2 3 a\n now the type of col2 is category { a : 1, b : 2}\n >>> x, y, nas = proc_df(df, 'col1')\n >>> x\n col2\n 0 1\n 1 2\n 2 1\n >>> data = DataFrame(pet=[\"cat\", \"dog\", \"dog\", \"fish\", \"cat\", \"dog\", \"cat\", \"fish\"],\n children=[4., 6, 3, 3, 2, 3, 5, 4],\n salary=[90, 24, 44, 27, 32, 59, 36, 27])\n >>> mapper = DataFrameMapper([(:pet, LabelBinarizer()),\n ([:children], StandardScaler())])\n >>>round(fit_transform!(mapper, copy(data)), 2)\n 8x4 Array{Float64,2}:\n 1.0 0.0 0.0 0.21\n 0.0 1.0 0.0 1.88\n 0.0 1.0 0.0 -0.63\n 0.0 0.0 1.0 -0.63\n 1.0 0.0 0.0 -1.46\n 0.0 1.0 0.0 -0.63\n 1.0 0.0 0.0 1.04\n 0.0 0.0 1.0 0.21\n " ]
Please provide a description of the function:def set_rf_samples(n): forest._generate_sample_indices = (lambda rs, n_samples: forest.check_random_state(rs).randint(0, n_samples, n))
[ " Changes Scikit learn's random forests to give each tree a random sample of\n n random rows.\n " ]
Please provide a description of the function:def reset_rf_samples(): forest._generate_sample_indices = (lambda rs, n_samples: forest.check_random_state(rs).randint(0, n_samples, n_samples))
[ " Undoes the changes produced by set_rf_samples.\n " ]
Please provide a description of the function:def get_global_vars(mod): "Return globally assigned variables." # https://stackoverflow.com/questions/8820276/docstring-for-variable/31764368#31764368 import ast,re with open(mod.__file__, 'r') as f: fstr = f.read() flines = fstr.splitlines() d = {} for node in ast.walk(ast.parse(fstr)): if isinstance(node,ast.Assign) and hasattr(node.targets[0], 'id'): key,lineno = node.targets[0].id,node.targets[0].lineno codestr = flines[lineno] match = re.match(f"^({key})\s*=\s*.*", codestr) if match and match.group(1) != '__all__': # only top level assignment d[key] = f'`{codestr}` {get_source_link(mod, lineno)}' return d
[]
Please provide a description of the function:def execute_nb(fname, metadata=None, save=True, show_doc_only=False): "Execute notebook `fname` with `metadata` for preprocessing." # Any module used in the notebook that isn't inside must be in the same directory as this script with open(fname) as f: nb = nbformat.read(f, as_version=4) ep_class = ExecuteShowDocPreprocessor if show_doc_only else ExecutePreprocessor ep = ep_class(timeout=600, kernel_name='python3') metadata = metadata or {} ep.preprocess(nb, metadata) if save: with open(fname, 'wt') as f: nbformat.write(nb, f) NotebookNotary().sign(nb)
[]
Please provide a description of the function:def create_module_page(mod, dest_path, force=False): "Create the documentation notebook for module `mod_name` in path `dest_path`" nb = get_empty_notebook() mod_name = mod.__name__ strip_name = strip_fastai(mod_name) init_cell = [get_md_cell(f'## Title for {strip_name} (use plain english, not module name!)'), get_md_cell('Type an introduction of the package here.')] cells = [get_code_cell(f'from fastai.gen_doc.nbdoc import *\nfrom {mod_name} import * ', True)] gvar_map = get_global_vars(mod) if gvar_map: cells.append(get_md_cell('### Global Variable Definitions:')) for name in get_exports(mod): if name in gvar_map: cells.append(get_md_cell(gvar_map[name])) for ft_name in get_ft_names(mod, include_inner=True): if not hasattr(mod, ft_name): warnings.warn(f"Module {strip_name} doesn't have a function named {ft_name}.") continue cells += _symbol_skeleton(ft_name) elt = getattr(mod, ft_name) nb['cells'] = init_cell + cells + [get_md_cell(UNDOC_HEADER)] doc_path = get_doc_path(mod, dest_path) write_nb(nb, doc_path, 'w' if force else 'x') execute_nb(doc_path) return doc_path
[]
Please provide a description of the function:def get_module_names(path_dir, exclude=None): if exclude is None: exclude = _default_exclude "Search a given `path_dir` and return all the modules contained inside except those in `exclude`" files = sorted(path_dir.glob('*'), key=lambda x: (x.is_dir(), x.name), reverse=True) # directories first res = [f'{path_dir.name}'] for f in files: if f.is_dir() and f.name in exclude: continue # exclude directories if any([f.name.endswith(ex) for ex in exclude]): continue # exclude extensions if f.suffix == '.py': res.append(f'{path_dir.name}.{f.stem}') elif f.is_dir(): res += [f'{path_dir.name}.{name}' for name in get_module_names(f)] return res
[]
Please provide a description of the function:def read_nb(fname): "Read a notebook in `fname` and return its corresponding json" with open(fname,'r') as f: return nbformat.reads(f.read(), as_version=4)
[]
Please provide a description of the function:def read_nb_content(cells, mod_name): "Build a dictionary containing the position of the `cells`." doc_fns = {} for i, cell in enumerate(cells): if cell['cell_type'] == 'code': for match in SHOW_DOC_RE.findall(cell['source']): doc_fns[match] = i return doc_fns
[]
Please provide a description of the function:def link_markdown_cells(cells, modules): "Create documentation links for all cells in markdown with backticks." for i, cell in enumerate(cells): if cell['cell_type'] == 'markdown': cell['source'] = link_docstring(modules, cell['source'])
[]
Please provide a description of the function:def get_insert_idx(pos_dict, name): "Return the position to insert a given function doc in a notebook." keys,i = list(pos_dict.keys()),0 while i < len(keys) and str.lower(keys[i]) < str.lower(name): i+=1 if i == len(keys): return -1 else: return pos_dict[keys[i]]
[]
Please provide a description of the function:def update_pos(pos_dict, start_key, nbr=2): "Update the `pos_dict` by moving all positions after `start_key` by `nbr`." for key,idx in pos_dict.items(): if str.lower(key) >= str.lower(start_key): pos_dict[key] += nbr return pos_dict
[]
Please provide a description of the function:def insert_cells(cells, pos_dict, ft_name, append=False): "Insert the function doc `cells` at their correct position and updates `pos_dict`." idx = get_insert_idx(pos_dict, ft_name) if append or idx == -1: cells += [get_doc_cell(ft_name), get_empty_cell()] else: cells.insert(idx, get_doc_cell(ft_name)) cells.insert(idx+1, get_empty_cell()) pos_dict = update_pos(pos_dict, ft_name, 2) return cells, pos_dict
[]
Please provide a description of the function:def update_nb_metadata(nb_path=None, title=None, summary=None, keywords='fastai', overwrite=True, **kwargs): "Creates jekyll metadata for given notebook path." nb = read_nb(nb_path) data = {'title': title, 'summary': summary, 'keywords': keywords, **kwargs} data = {k:v for (k,v) in data.items() if v is not None} # remove none values if not data: return nb['metadata']['jekyll'] = data write_nb(nb, nb_path) NotebookNotary().sign(nb)
[]
Please provide a description of the function:def get_imported_modules(cells, nb_module_name=''): "Finds all submodules of notebook - sorted by submodules > top level modules > manual imports. This gives notebook imports priority" module_names = get_top_level_modules() nb_imports = [match.group(1) for cell in cells for match in IMPORT_RE.finditer(cell['source']) if cell['cell_type'] == 'code'] parts = nb_module_name.split('.') parent_modules = ['.'.join(parts[:(x+1)]) for x in range_of(parts)] # Imports parent modules - a.b.c = [a, a.b, a.b.c] all_modules = module_names + nb_imports + parent_modules mods = [import_mod(m, ignore_errors=True) for m in all_modules] return [m for m in mods if m is not None]
[]
Please provide a description of the function:def update_module_page(mod, dest_path='.'): "Update the documentation notebook of a given module." doc_path = get_doc_path(mod, dest_path) strip_name = strip_fastai(mod.__name__) nb = read_nb(doc_path) cells = nb['cells'] link_markdown_cells(cells, get_imported_modules(cells, mod.__name__)) type_dict = read_nb_types(cells) gvar_map = get_global_vars(mod) for name in get_exports(mod): if name not in gvar_map: continue code = gvar_map[name] if name in type_dict: cells[type_dict[name]] = get_md_cell(code) else: cells.append(get_md_cell(code)) pos_dict = read_nb_content(cells, strip_name) ft_names = get_ft_names(mod, include_inner=True) new_fts = list(set(ft_names) - set(pos_dict.keys())) if new_fts: print(f'Found new fuctions for {mod}. Please document:\n{new_fts}') existing, undoc_cells, new_cells = parse_sections(cells) for ft_name in new_fts: new_cells.extend([get_doc_cell(ft_name), get_empty_cell()]) if len(new_cells) > 1: nb['cells'] = existing + undoc_cells + new_cells write_nb(nb, doc_path) return doc_path
[]
Please provide a description of the function:def update_notebooks(source_path, dest_path=None, update_html=True, document_new_fns=False, update_nb_links=True, html_path=None, force=False): "`source_path` can be a directory or a file. Assume all modules reside in the fastai directory." from .convert2html import convert_nb source_path = Path(source_path) if source_path.is_file(): dest_path = source_path.parent if dest_path is None else Path(dest_path) html_path = dest_path/'..'/'docs' if html_path is None else Path(html_path) doc_path = source_path assert source_path.suffix == '.ipynb', 'Must update from notebook or module' if document_new_fns: mod = import_mod(get_module_from_notebook(source_path)) if not mod: print('Could not find module for path:', source_path) elif mod.__file__.endswith('__init__.py'): pass else: update_module_page(mod, dest_path) generate_missing_metadata(doc_path) if update_nb_links: print(f'Updating notebook {doc_path}. Please wait...') link_nb(doc_path) execute_nb(doc_path, {'metadata': {'path': doc_path.parent}}, show_doc_only=True) if update_html: check_nbconvert_version() html_fn = html_path/doc_path.with_suffix('.html').name if not force and html_fn.is_file(): in_mod = os.path.getmtime(doc_path) out_mod = os.path.getmtime(html_fn) if in_mod < out_mod: return convert_nb(doc_path, html_path) elif (source_path.name.startswith('fastai.')): # Do module update assert dest_path is not None, 'To update a module, you must specify a destination folder for where notebook resides' mod = import_mod(source_path.name) if not mod: return print('Could not find module for:', source_path) doc_path = Path(dest_path)/(strip_fastai(mod.__name__)+'.ipynb') if not doc_path.exists(): print('Notebook does not exist. Creating:', doc_path) create_module_page(mod, dest_path) update_notebooks(doc_path, dest_path=dest_path, update_html=update_html, document_new_fns=document_new_fns, update_nb_links=update_nb_links, html_path=html_path) elif source_path.is_dir(): for f in sorted(Path(source_path).glob('*.ipynb')): update_notebooks(f, dest_path=dest_path, update_html=update_html, document_new_fns=document_new_fns, update_nb_links=update_nb_links, html_path=html_path) else: print('Could not resolve source file:', source_path)
[]
Please provide a description of the function:def dropout_mask(x:Tensor, sz:Collection[int], p:float): "Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element." return x.new(*sz).bernoulli_(1-p).div_(1-p)
[]
Please provide a description of the function:def awd_lstm_lm_split(model:nn.Module) -> List[nn.Module]: "Split a RNN `model` in groups for differential learning rates." groups = [[rnn, dp] for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)] return groups + [[model[0].encoder, model[0].encoder_dp, model[1]]]
[]
Please provide a description of the function:def value2rgba(x:float, cmap:Callable=cm.RdYlGn, alpha_mult:float=1.0)->Tuple: "Convert a value `x` from 0 to 1 (inclusive) to an RGBA tuple according to `cmap` times transparency `alpha_mult`." c = cmap(x) rgb = (np.array(c[:-1]) * 255).astype(int) a = c[-1] * alpha_mult return tuple(rgb.tolist() + [a])
[]
Please provide a description of the function:def _setweights(self): "Apply dropout to the raw weights." for layer in self.layer_names: raw_w = getattr(self, f'{layer}_raw') self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_p, training=self.training)
[]
Please provide a description of the function:def _one_hidden(self, l:int)->Tensor: "Return one hidden state." nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir return one_param(self).new(1, self.bs, nh).zero_()
[]
Please provide a description of the function:def reset(self): "Reset the hidden states." [r.reset() for r in self.rnns if hasattr(r, 'reset')] if self.qrnn: self.hidden = [self._one_hidden(l) for l in range(self.n_layers)] else: self.hidden = [(self._one_hidden(l), self._one_hidden(l)) for l in range(self.n_layers)]
[]
Please provide a description of the function:def intrinsic_attention(self, text:str, class_id:int=None): self.model.train() _eval_dropouts(self.model) self.model.zero_grad() self.model.reset() ids = self.data.one_item(text)[0] emb = self.model[0].module.encoder(ids).detach().requires_grad_(True) lstm_output = self.model[0].module(emb, from_embeddings=True) self.model.eval() cl = self.model[1](lstm_output + (torch.zeros_like(ids).byte(),))[0].softmax(dim=-1) if class_id is None: class_id = cl.argmax() cl[0][class_id].backward() attn = emb.grad.squeeze().abs().sum(dim=-1) attn /= attn.max() tokens = self.data.single_ds.reconstruct(ids[0]) return tokens, attn
[ "Calculate the intrinsic attention of the input w.r.t to an output `class_id`, or the classification given by the model if `None`.\n For reference, see the Sequential Jacobian session at https://www.cs.toronto.edu/~graves/preprint.pdf\n " ]
Please provide a description of the function:def show_top_losses(self, k:int, max_len:int=70)->None: from IPython.display import display, HTML items = [] tl_val,tl_idx = self.top_losses() for i,idx in enumerate(tl_idx): if k <= 0: break k -= 1 tx,cl = self.data.dl(self.ds_type).dataset[idx] cl = cl.data classes = self.data.classes txt = ' '.join(tx.text.split(' ')[:max_len]) if max_len is not None else tx.text tmp = [txt, f'{classes[self.pred_class[idx]]}', f'{classes[cl]}', f'{self.losses[idx]:.2f}', f'{self.probs[idx][cl]:.2f}'] items.append(tmp) items = np.array(items) names = ['Text', 'Prediction', 'Actual', 'Loss', 'Probability'] df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) with pd.option_context('display.max_colwidth', -1): display(HTML(df.to_html(index=False)))
[ "\n Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of\n actual class. `max_len` is the maximum number of tokens displayed.\n " ]
Please provide a description of the function:def on_train_begin(self, epoch:int, **kwargs:Any)->None: "Initialize the schedulers for training." res = {'epoch':self.start_epoch} if self.start_epoch is not None else None self.start_epoch = ifnone(self.start_epoch, epoch) self.scheds = [p.scheds for p in self.phases] self.opt = self.learn.opt for k,v in self.scheds[0].items(): v.restart() self.opt.set_stat(k, v.start) self.idx_s = 0 return res
[]
Please provide a description of the function:def on_batch_end(self, train, **kwargs:Any)->None: "Take a step in lr,mom sched, start next stepper when the current one is complete." if train: if self.idx_s >= len(self.scheds): return {'stop_training': True, 'stop_epoch': True} sched = self.scheds[self.idx_s] for k,v in sched.items(): self.opt.set_stat(k, v.step()) if list(sched.values())[0].is_done: self.idx_s += 1
[]
Please provide a description of the function:def tensor(x:Any, *rest)->Tensor: "Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly." if len(rest): x = (x,)+rest # XXX: Pytorch bug in dataloader using num_workers>0; TODO: create repro and report if is_listy(x) and len(x)==0: return tensor(0) res = torch.tensor(x) if is_listy(x) else as_tensor(x) if res.dtype is torch.int32: warn('Tensor is int32: upgrading to int64; for better performance use int64 input') return res.long() return res
[]
Please provide a description of the function:def to_detach(b:Tensors, cpu:bool=True): "Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`." if is_listy(b): return [to_detach(o, cpu) for o in b] if not isinstance(b,Tensor): return b b = b.detach() return b.cpu() if cpu else b
[]
Please provide a description of the function:def to_data(b:ItemsList): "Recursively map lists of items in `b ` to their wrapped data." if is_listy(b): return [to_data(o) for o in b] return b.data if isinstance(b,ItemBase) else b
[]
Please provide a description of the function:def to_cpu(b:ItemsList): "Recursively map lists of tensors in `b ` to the cpu." if is_listy(b): return [to_cpu(o) for o in b] return b.cpu() if isinstance(b,Tensor) else b
[]
Please provide a description of the function:def to_half(b:Collection[Tensor])->Collection[Tensor]: "Recursively map lists of tensors in `b ` to FP16." if is_listy(b): return [to_half(o) for o in b] return b.half() if b.dtype not in [torch.int64, torch.int32, torch.int16] else b
[]
Please provide a description of the function:def to_float(b:Collection[Tensor])->Collection[Tensor]: "Recursively map lists of tensors in `b ` to FP16." if is_listy(b): return [to_float(o) for o in b] return b.float() if b.dtype not in [torch.int64, torch.int32, torch.int16] else b
[]
Please provide a description of the function:def to_device(b:Tensors, device:torch.device): "Recursively put `b` on `device`." device = ifnone(device, defaults.device) if is_listy(b): return [to_device(o, device) for o in b] if is_dict(b): return {k: to_device(v, device) for k, v in b.items()} return b.to(device, non_blocking=True)
[]
Please provide a description of the function:def data_collate(batch:ItemsList)->Tensor: "Convert `batch` items to tensor data." return torch.utils.data.dataloader.default_collate(to_data(batch))
[]
Please provide a description of the function:def requires_grad(m:nn.Module, b:Optional[bool]=None)->Optional[bool]: "If `b` is not set return `requires_grad` of first param, else set `requires_grad` on all params as `b`" ps = list(m.parameters()) if not ps: return None if b is None: return ps[0].requires_grad for p in ps: p.requires_grad=b
[]
Please provide a description of the function:def trainable_params(m:nn.Module)->ParamList: "Return list of trainable params in `m`." res = filter(lambda p: p.requires_grad, m.parameters()) return res
[]
Please provide a description of the function:def children_and_parameters(m:nn.Module): "Return the children of `m` and its direct parameters not registered in modules." children = list(m.children()) children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[]) for p in m.parameters(): if id(p) not in children_p: children.append(ParameterModule(p)) return children
[]
Please provide a description of the function:def split_model_idx(model:nn.Module, idxs:Collection[int])->ModuleList: "Split `model` according to the indexes in `idxs`." layers = flatten_model(model) if idxs[0] != 0: idxs = [0] + idxs if idxs[-1] != len(layers): idxs.append(len(layers)) return [nn.Sequential(*layers[i:j]) for i,j in zip(idxs[:-1],idxs[1:])]
[]
Please provide a description of the function:def split_model(model:nn.Module=None, splits:Collection[Union[nn.Module,ModuleList]]=None): "Split `model` according to the layers in `splits`." splits = listify(splits) if isinstance(splits[0], nn.Module): layers = flatten_model(model) idxs = [layers.index(first_layer(s)) for s in splits] return split_model_idx(model, idxs) return [nn.Sequential(*s) for s in splits]
[]
Please provide a description of the function:def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]: "Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest." split_params = [] for l in layer_groups: l1,l2 = [],[] for c in l.children(): if isinstance(c, no_wd_types): l2 += list(trainable_params(c)) elif isinstance(c, bias_types): bias = c.bias if hasattr(c, 'bias') else None l1 += [p for p in trainable_params(c) if not (p is bias)] if bias is not None: l2.append(bias) else: l1 += list(trainable_params(c)) #Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order #for the optimizer load of state_dict l1,l2 = uniqueify(l1),uniqueify(l2) split_params += [l1, l2] return split_params
[]
Please provide a description of the function:def set_bn_eval(m:nn.Module)->None: "Set bn layers in eval mode for all recursive children of `m`." for l in m.children(): if isinstance(l, bn_types) and not next(l.parameters()).requires_grad: l.eval() set_bn_eval(l)
[]
Please provide a description of the function:def bn2float(module:nn.Module)->nn.Module: "If `module` is batchnorm don't use half precision." if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): module.float() for child in module.children(): bn2float(child) return module
[]
Please provide a description of the function:def init_default(m:nn.Module, func:LayerFunc=nn.init.kaiming_normal_)->None: "Initialize `m` weights with `func` and set `bias` to 0." if func: if hasattr(m, 'weight'): func(m.weight) if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.) return m
[]
Please provide a description of the function:def cond_init(m:nn.Module, init_func:LayerFunc): "Initialize the non-batchnorm layers of `m` with `init_func`." if (not isinstance(m, bn_types)) and requires_grad(m): init_default(m, init_func)
[]
Please provide a description of the function:def apply_init(m, init_func:LayerFunc): "Initialize all non-batchnorm layers of `m` with `init_func`." apply_leaf(m, partial(cond_init, init_func=init_func))
[]
Please provide a description of the function:def in_channels(m:nn.Module) -> List[int]: "Return the shape of the first weight layer in `m`." for l in flatten_model(m): if hasattr(l, 'weight'): return l.weight.shape[1] raise Exception('No weight layer')
[]
Please provide a description of the function:def model_type(dtype): "Return the torch type corresponding to `dtype`." return (torch.float32 if np.issubdtype(dtype, np.floating) else torch.int64 if np.issubdtype(dtype, np.integer) else None)
[]
Please provide a description of the function:def np2model_tensor(a): "Tranform numpy array `a` to a tensor of the same type." dtype = model_type(a.dtype) res = as_tensor(a) if not dtype: return res return res.type(dtype)
[]
Please provide a description of the function:def _pca(x, k=2): "Compute PCA of `x` with `k` dimensions." x = x-torch.mean(x,0) U,S,V = torch.svd(x.t()) return torch.mm(x,U[:,:k])
[]
Please provide a description of the function:def grab_idx(x,i,batch_first:bool=True): "Grab the `i`-th batch in `x`, `batch_first` stating the batch dimension." if batch_first: return ([o[i].cpu() for o in x] if is_listy(x) else x[i].cpu()) else: return ([o[:,i].cpu() for o in x] if is_listy(x) else x[:,i].cpu())
[]
Please provide a description of the function:def logit_(x:Tensor)->Tensor: "Inplace logit of `x`, clamped to avoid inf" x.clamp_(1e-7, 1-1e-7) return (x.reciprocal_().sub_(1)).log_().neg_()
[]
Please provide a description of the function:def uniform(low:Number, high:Number=None, size:Optional[List[int]]=None)->FloatOrTensor: "Draw 1 or shape=`size` random floats from uniform dist: min=`low`, max=`high`." if high is None: high=low return random.uniform(low,high) if size is None else torch.FloatTensor(*listify(size)).uniform_(low,high)
[]
Please provide a description of the function:def log_uniform(low, high, size:Optional[List[int]]=None)->FloatOrTensor: "Draw 1 or shape=`size` random floats from uniform dist: min=log(`low`), max=log(`high`)." res = uniform(log(low), log(high), size) return exp(res) if size is None else res.exp_()
[]
Please provide a description of the function:def rand_bool(p:float, size:Optional[List[int]]=None)->BoolOrTensor: "Draw 1 or shape=`size` random booleans (`True` occuring with probability `p`)." return uniform(0,1,size)<p
[]
Please provide a description of the function:def uniform_int(low:int, high:int, size:Optional[List[int]]=None)->IntOrTensor: "Generate int or tensor `size` of ints between `low` and `high` (included)." return random.randint(low,high) if size is None else torch.randint(low,high+1,size)
[]
Please provide a description of the function:def try_int(o:Any)->Any: "Try to convert `o` to int, default to `o` if not possible." # NB: single-item rank-1 array/tensor can be converted to int, but we don't want to do this if isinstance(o, (np.ndarray,Tensor)): return o if o.ndim else int(o) if isinstance(o, collections.Sized) or getattr(o,'__array_interface__',False): return o try: return int(o) except: return o
[]
Please provide a description of the function:def get_model(model:nn.Module): "Return the model maybe wrapped inside `model`." return model.module if isinstance(model, (DistributedDataParallel, nn.DataParallel)) else model
[]
Please provide a description of the function:def flatten_check(out:Tensor, targ:Tensor) -> Tensor: "Check that `out` and `targ` have the same number of elements and flatten them." out,targ = out.contiguous().view(-1),targ.contiguous().view(-1) assert len(out) == len(targ), f"Expected output and target to have the same number of elements but got {len(out)} and {len(targ)}." return out,targ
[]
Please provide a description of the function:def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict
[ "create new OrderedDict that does not contain `module.`" ]
Please provide a description of the function:def add_metrics(last_metrics:Collection[Rank0Tensor], mets:Union[Rank0Tensor, Collection[Rank0Tensor]]): "Return a dictionary for updating `last_metrics` with `mets`." last_metrics,mets = listify(last_metrics),listify(mets) return {'last_metrics': last_metrics + mets}
[]
Please provide a description of the function:def map(self, fn, *iterables, timeout=None, chunksize=1, prefetch=None): if timeout is not None: end_time = timeout + time.time() if prefetch is None: prefetch = self._max_workers if prefetch < 0: raise ValueError("prefetch count may not be negative") argsiter = zip(*iterables) fs = collections.deque(self.submit(fn, *args) for args in itertools.islice(argsiter, self._max_workers+prefetch)) # Yield must be hidden in closure so that the futures are submitted before the first iterator value is required. def result_iterator(): nonlocal argsiter try: while fs: res = fs[0].result() if timeout is None else fs[0].result(end_time-time.time()) # Got a result, future needn't be cancelled del fs[0] # Dispatch next task before yielding to keep pipeline full if argsiter: try: args = next(argsiter) except StopIteration: argsiter = None else: fs.append(self.submit(fn, *args)) yield res finally: for future in fs: future.cancel() return result_iterator()
[ "\n Collects iterables lazily, rather than immediately.\n Docstring same as parent: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Executor\n Implmentation taken from this PR: https://github.com/python/cpython/pull/707\n " ]
Please provide a description of the function:def gen_ascii_docs(src='fastai'): os.chdir(Path(__file__).absolute().parent) with working_directory('..'): path = Path(src) if path.is_dir(): file_paths = list(path.glob('**/*.py')) else: file_paths = [path] pat = re.compile('^(?!__init__).*.py\Z') for file_path in file_paths: if pat.match(file_path.name): file_path.parent.mkdir(parents=True, exist_ok=True) with working_directory('..'): tmpl_str = parse_module(file_path) (file_path.parent/(file_path.name.rsplit('.',1)[0] + '.adoc.tmpl')).write_text(tmpl_str) (file_path.parent/(file_path.name.rsplit('.',1)[0] + '.adoc')).write_text(re.sub(r"{{(.*?)}}", parse_tmpl, tmpl_str, flags=re.DOTALL)) if path.is_dir(): subprocess.call(['asciidoctor', str(path) + '/**/*.adoc']) else: subprocess.call(['asciidoctor', str(path).rsplit('.',1)[0] + '.adoc'])
[ "Generate documentation for fastai library in HTML (asciidoctor required)\n :param str src: The absolute/relative path of source file/dir\n " ]
Please provide a description of the function:def _get_new_batch(self, ds_type:DatasetType)->Collection[Tensor]: "Retrieves new batch of DatasetType, and detaches it." return self.learn.data.one_batch(ds_type=ds_type, detach=True, denorm=False, cpu=False)
[]
Please provide a description of the function:def _update_batches_if_needed(self)->None: "one_batch function is extremely slow with large datasets. This is caching the result as an optimization." if self.learn.data.valid_dl is None: return # Running learning rate finder, so return update_batches = self.data is not self.learn.data if not update_batches: return self.data = self.learn.data self.trn_batch = self._get_new_batch(ds_type=DatasetType.Train) self.val_batch = self._get_new_batch(ds_type=DatasetType.Valid)
[]
Please provide a description of the function:def _write_model_stats(self, iteration:int)->None: "Writes gradient statistics to Tensorboard." self.stats_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
[]
Please provide a description of the function:def _write_training_loss(self, iteration:int, last_loss:Tensor)->None: "Writes training loss to Tensorboard." scalar_value = to_np(last_loss) tag = self.metrics_root + 'train_loss' self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
[]
Please provide a description of the function:def _write_weight_histograms(self, iteration:int)->None: "Writes model weight histograms to Tensorboard." self.hist_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
[]
Please provide a description of the function:def _write_scalar(self, name:str, scalar_value, iteration:int)->None: "Writes single scalar value to Tensorboard." tag = self.metrics_root + name self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
[]