code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/71_callback.tensorboard.ipynb (unless otherwise specified).
__all__ = ['TensorBoardBaseCallback', 'TensorBoardCallback', 'TensorBoardProjectorCallback',
'projector_word_embeddings']
# Cell
from ..basics import *
# Cell
import tensorboard
from torch.utils.tensorboard import SummaryWriter
from .fp16 import ModelToHalf
from .hook import hook_output
# Cell
class TensorBoardBaseCallback(Callback):
"Base class for tensorboard callbacks"
def __init__(self):
self.run_projector = False
def after_pred(self):
if self.run_projector: self.feat = _add_projector_features(self.learn, self.h, self.feat)
def after_validate(self):
if not self.run_projector: return
self.run_projector = False
self._remove()
_write_projector_embedding(self.learn, self.writer, self.feat)
def after_fit(self):
if self.run: self.writer.close()
def _setup_projector(self):
self.run_projector = True
self.h = hook_output(self.learn.model[1][1] if not self.layer else self.layer)
self.feat = {}
def _setup_writer(self):
self.writer = SummaryWriter(log_dir=self.log_dir)
def _remove(self):
if getattr(self, 'h', None): self.h.remove()
def __del__(self): self._remove()
# Cell
class TensorBoardCallback(TensorBoardBaseCallback):
"Saves model topology, losses & metrics for tensorboard and tensorboard projector during training"
def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9, projector=False, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
if self.trace_model:
if hasattr(self.learn, 'mixed_precision'):
raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.")
b = self.dls.one_batch()
self.learn._split(b)
self.writer.add_graph(self.model, *self.xb)
def after_batch(self):
self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter)
for i,h in enumerate(self.opt.hypers):
for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter)
def after_epoch(self):
for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]):
self.writer.add_scalar(n, v, self.train_iter)
if self.log_preds:
b = self.dls.valid.one_batch()
self.learn.one_batch(0, b)
preds = getattr(self.loss_func, 'activation', noop)(self.pred)
out = getattr(self.loss_func, 'decodes', noop)(preds)
x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds)
tensorboard_log(x, y, its, outs, self.writer, self.train_iter)
def before_validate(self):
if self.projector: self._setup_projector()
# Cell
class TensorBoardProjectorCallback(TensorBoardBaseCallback):
"Extracts and exports image featuers for tensorboard projector during inference"
def __init__(self, log_dir=None, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
def before_validate(self):
self._setup_projector()
# Cell
def _write_projector_embedding(learn, writer, feat):
lbls = [learn.dl.vocab[l] for l in feat['lbl']] if getattr(learn.dl, 'vocab', None) else None
vecs = feat['vec'].squeeze()
writer.add_embedding(vecs, metadata=lbls, label_img=feat['img'], global_step=learn.train_iter)
# Cell
def _add_projector_features(learn, hook, feat):
img = _normalize_for_projector(learn.x)
first_epoch = True if learn.iter == 0 else False
feat['vec'] = hook.stored if first_epoch else torch.cat((feat['vec'], hook.stored),0)
feat['img'] = img if first_epoch else torch.cat((feat['img'], img),0)
if getattr(learn.dl, 'vocab', None):
feat['lbl'] = learn.y if first_epoch else torch.cat((feat['lbl'], learn.y),0)
return feat
# Cell
def _get_embeddings(model, layer):
layer = model[0].encoder if layer == None else layer
return layer.weight
# Cell
@typedispatch
def _normalize_for_projector(x:TensorImage):
# normalize tensor to be between 0-1
img = x.clone()
sz = img.shape
img = img.view(x.size(0), -1)
img -= img.min(1, keepdim=True)[0]
img /= img.max(1, keepdim=True)[0]
img = img.view(*sz)
return img
# Cell
from ..text.all import LMLearner, TextLearner
# Cell
def projector_word_embeddings(learn=None, layer=None, vocab=None, limit=-1, start=0, log_dir=None):
"Extracts and exports word embeddings from language models embedding layers"
if not layer:
if isinstance(learn, LMLearner): layer = learn.model[0].encoder
elif isinstance(learn, TextLearner): layer = learn.model[0].module.encoder
emb = layer.weight
img = torch.full((len(emb),3,8,8), 0.7)
vocab = learn.dls.vocab[0] if vocab == None else vocab
vocab = list(map(lambda x: f'{x}_', vocab))
writer = SummaryWriter(log_dir=log_dir)
end = start + limit if limit >= 0 else -1
writer.add_embedding(emb[start:end], metadata=vocab[start:end], label_img=img[start:end])
writer.close()
# Cell
from ..vision.data import *
# Cell
@typedispatch
def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):
fig,axs = get_grid(len(samples), add_vert=1, return_fig=True)
for i in range(2):
axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]
axs = [r.show(ctx=c, color='green' if b==r else 'red')
for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]
writer.add_figure('Sample results', fig, step)
# Cell
from ..vision.core import TensorPoint,TensorBBox
# Cell
@typedispatch
def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step):
fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True)
for i in range(2):
axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]
for x in [samples,outs]:
axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]
writer.add_figure('Sample results', fig, step)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/tensorboard.py
|
tensorboard.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/14_callback.schedule.ipynb (unless otherwise specified).
__all__ = ['annealer', 'sched_lin', 'sched_cos', 'sched_no', 'sched_exp', 'SchedLin', 'SchedCos', 'SchedNo', 'SchedExp',
'SchedPoly', 'combine_scheds', 'combined_cos', 'ParamScheduler', 'LRFinder', 'SuggestedLRs']
# Cell
from ..basics import *
# Cell
class _Annealer:
def __init__(self, f, start, end): store_attr('f,start,end')
def __call__(self, pos): return self.f(self.start, self.end, pos)
# Cell
def annealer(f):
"Decorator to make `f` return itself partially applied."
@functools.wraps(f)
def _inner(start, end): return _Annealer(f, start, end)
return _inner
# Cell
#TODO Jeremy, make this pickle
#@annealer
#def SchedLin(start, end, pos): return start + pos*(end-start)
#@annealer
#def SchedCos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
#@annealer
#def SchedNo (start, end, pos): return start
#@annealer
#def SchedExp(start, end, pos): return start * (end/start) ** pos
#
#SchedLin.__doc__ = "Linear schedule function from `start` to `end`"
#SchedCos.__doc__ = "Cosine schedule function from `start` to `end`"
#SchedNo .__doc__ = "Constant schedule function with `start` value"
#SchedExp.__doc__ = "Exponential schedule function from `start` to `end`"
# Cell
def sched_lin(start, end, pos): return start + pos*(end-start)
def sched_cos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end-start) / 2
def sched_no (start, end, pos): return start
def sched_exp(start, end, pos): return start * (end/start) ** pos
def SchedLin(start, end): return _Annealer(sched_lin, start, end)
def SchedCos(start, end): return _Annealer(sched_cos, start, end)
def SchedNo (start, end): return _Annealer(sched_no, start, end)
def SchedExp(start, end): return _Annealer(sched_exp, start, end)
SchedLin.__doc__ = "Linear schedule function from `start` to `end`"
SchedCos.__doc__ = "Cosine schedule function from `start` to `end`"
SchedNo .__doc__ = "Constant schedule function with `start` value"
SchedExp.__doc__ = "Exponential schedule function from `start` to `end`"
# Cell
def SchedPoly(start, end, power):
"Polynomial schedule (of `power`) function from `start` to `end`"
def _inner(pos): return start + (end - start) * pos ** power
return _inner
# Cell
def combine_scheds(pcts, scheds):
"Combine `scheds` according to `pcts` in one function"
assert sum(pcts) == 1.
pcts = tensor([0] + L(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(pcts, 0)
def _inner(pos):
if int(pos) == 1: return scheds[-1](1.)
idx = (pos >= pcts).nonzero().max()
actual_pos = (pos-pcts[idx]) / (pcts[idx+1]-pcts[idx])
return scheds[idx](actual_pos.item())
return _inner
# Cell
def combined_cos(pct, start, middle, end):
"Return a scheduler with cosine annealing from `start`→`middle` & `middle`→`end`"
return combine_scheds([pct,1-pct], [SchedCos(start, middle), SchedCos(middle, end)])
# Cell
@docs
class ParamScheduler(Callback):
"Schedule hyper-parameters according to `scheds`"
order,run_valid = 60,False
def __init__(self, scheds): self.scheds = scheds
def before_fit(self): self.hps = {p:[] for p in self.scheds.keys()}
def before_batch(self): self._update_val(self.pct_train)
def _update_val(self, pct):
for n,f in self.scheds.items(): self.opt.set_hyper(n, f(pct))
def after_batch(self):
for p in self.scheds.keys(): self.hps[p].append(self.opt.hypers[-1][p])
def after_fit(self):
if hasattr(self.learn, 'recorder') and hasattr(self, 'hps'): self.recorder.hps = self.hps
_docs = {"before_fit": "Initialize container for hyper-parameters",
"before_batch": "Set the proper hyper-parameters in the optimizer",
"after_batch": "Record hyper-parameters of this batch",
"after_fit": "Save the hyper-parameters in the recorder if there is one"}
# Cell
@patch
def fit_one_cycle(self:Learner, n_epoch, lr_max=None, div=25., div_final=1e5, pct_start=0.25, wd=None,
moms=None, cbs=None, reset_opt=False):
"Fit `self.model` for `n_epoch` using the 1cycle policy."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
# Cell
@patch
def plot_sched(self:Recorder, keys=None, figsize=None):
keys = self.hps.keys() if keys is None else L(keys)
rows,cols = (len(keys)+1)//2, min(2, len(keys))
figsize = figsize or (6*cols,4*rows)
_, axs = plt.subplots(rows, cols, figsize=figsize)
axs = axs.flatten() if len(keys) > 1 else L(axs)
for p,ax in zip(keys, axs):
ax.plot(self.hps[p])
ax.set_ylabel(p)
# Cell
@patch
def fit_flat_cos(self:Learner, n_epoch, lr=None, div_final=1e5, pct_start=0.75, wd=None,
cbs=None, reset_opt=False):
"Fit `self.model` for `n_epoch` at flat `lr` before a cosine annealing."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr is None else lr)
lr = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr, lr, lr/div_final)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
# Cell
@patch
def fit_sgdr(self:Learner, n_cycles, cycle_len, lr_max=None, cycle_mult=2, cbs=None, reset_opt=False, wd=None):
"Fit `self.model` for `n_cycles` of `cycle_len` using SGDR."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
n_epoch = cycle_len * (cycle_mult**n_cycles-1)//(cycle_mult-1)
pcts = [cycle_len * cycle_mult**i / n_epoch for i in range(n_cycles)]
scheds = [SchedCos(lr_max, 0) for _ in range(n_cycles)]
scheds = {'lr': combine_scheds(pcts, scheds)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
# Cell
@patch
@delegates(Learner.fit_one_cycle)
def fine_tune(self:Learner, epochs, base_lr=2e-3, freeze_epochs=1, lr_mult=100,
pct_start=0.3, div=5.0, **kwargs):
"Fine tune with `freeze` for `freeze_epochs` then with `unfreeze` from `epochs` using discriminative LR"
self.freeze()
self.fit_one_cycle(freeze_epochs, slice(base_lr), pct_start=0.99, **kwargs)
base_lr /= 2
self.unfreeze()
self.fit_one_cycle(epochs, slice(base_lr/lr_mult, base_lr), pct_start=pct_start, div=div, **kwargs)
# Cell
@docs
class LRFinder(ParamScheduler):
"Training with exponentially growing learning rate"
def __init__(self, start_lr=1e-7, end_lr=10, num_it=100, stop_div=True):
if is_listy(start_lr):
self.scheds = {'lr': [SchedExp(s, e) for (s,e) in zip(start_lr,end_lr)]}
else: self.scheds = {'lr': SchedExp(start_lr, end_lr)}
self.num_it,self.stop_div = num_it,stop_div
def before_fit(self):
super().before_fit()
self.learn.save('_tmp')
self.best_loss = float('inf')
def before_batch(self):
self._update_val(self.train_iter/self.num_it)
def after_batch(self):
super().after_batch()
if self.smooth_loss < self.best_loss: self.best_loss = self.smooth_loss
if self.smooth_loss > 4*self.best_loss and self.stop_div: raise CancelFitException()
if self.train_iter >= self.num_it: raise CancelFitException()
def before_validate(self): raise CancelValidException()
def after_fit(self):
self.learn.opt.zero_grad() #Need to zero the gradients of the model before detaching the optimizer for future fits
tmp_f = self.path/self.model_dir/'_tmp.pth'
if tmp_f.exists():
self.learn.load('_tmp', with_opt=True)
os.remove(tmp_f)
_docs = {"before_fit": "Initialize container for hyper-parameters and save the model",
"before_batch": "Set the proper hyper-parameters in the optimizer",
"after_batch": "Record hyper-parameters of this batch and potentially stop training",
"after_fit": "Save the hyper-parameters in the recorder if there is one and load the original model",
"before_validate": "Skip the validation part of training"}
# Cell
@patch
def plot_lr_find(self:Recorder, skip_end=5):
"Plot the result of an LR Finder test (won't work if you didn't do `learn.lr_find()` before)"
lrs = self.lrs if skip_end==0 else self.lrs [:-skip_end]
losses = self.losses if skip_end==0 else self.losses[:-skip_end]
fig, ax = plt.subplots(1,1)
ax.plot(lrs, losses)
ax.set_ylabel("Loss")
ax.set_xlabel("Learning Rate")
ax.set_xscale('log')
# Cell
SuggestedLRs = collections.namedtuple('SuggestedLRs', ['lr_min', 'lr_steep'])
# Cell
@patch
def lr_find(self:Learner, start_lr=1e-7, end_lr=10, num_it=100, stop_div=True, show_plot=True, suggestions=True):
"Launch a mock training to find a good learning rate, return lr_min, lr_steep if `suggestions` is True"
n_epoch = num_it//len(self.dls.train) + 1
cb=LRFinder(start_lr=start_lr, end_lr=end_lr, num_it=num_it, stop_div=stop_div)
with self.no_logging(): self.fit(n_epoch, cbs=cb)
if show_plot: self.recorder.plot_lr_find()
if suggestions:
lrs,losses = tensor(self.recorder.lrs[num_it//10:-5]),tensor(self.recorder.losses[num_it//10:-5])
if len(losses) == 0: return
lr_min = lrs[losses.argmin()].item()
grads = (losses[1:]-losses[:-1]) / (lrs[1:].log()-lrs[:-1].log())
lr_steep = lrs[grads.argmin()].item()
return SuggestedLRs(lr_min/10.,lr_steep)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/schedule.py
|
schedule.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/14a_callback.data.ipynb (unless otherwise specified).
__all__ = ['CollectDataCallback', 'CudaCallback', 'WeightedDL', 'PartialDL']
# Cell
from ..basics import *
# Cell
class CollectDataCallback(Callback):
"Collect all batches, along with `pred` and `loss`, into `self.data`. Mainly for testing"
def before_fit(self): self.data = L()
def after_batch(self):
self.data.append(self.learn.to_detach((self.xb,self.yb,self.pred,self.loss)))
# Cell
class CudaCallback(Callback):
"Move data to CUDA device"
def __init__(self, device=None): self.device = ifnone(device, default_device())
def before_batch(self): self.learn.xb,self.learn.yb = to_device(self.xb),to_device(self.yb)
def before_fit(self): self.model.to(self.device)
# Cell
@delegates()
class WeightedDL(TfmdDL):
def __init__(self, dataset=None, bs=None, wgts=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
wgts = array([1.]*len(dataset) if wgts is None else wgts)
self.wgts = wgts/wgts.sum()
def get_idxs(self):
if self.n==0: return []
if not self.shuffle: return super().get_idxs()
return list(np.random.choice(self.n, self.n, p=self.wgts))
# Cell
@patch
@delegates(Datasets.dataloaders)
def weighted_dataloaders(self:Datasets, wgts, bs=64, **kwargs):
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=WeightedDL, dl_kwargs=({'wgts':wgts}, *xtra_kwargs), **kwargs)
# Cell
@delegates()
class PartialDL(TfmdDL):
"Select randomly partial quantity of data at each epoch"
def __init__(self, dataset=None, bs=None, partial_n=None, **kwargs):
super().__init__(dataset=dataset, bs=bs, **kwargs)
self.partial_n = min(partial_n, self.n) if partial_n else None
def get_idxs(self):
if self.partial_n is None: return super().get_idxs()
return list(np.random.choice(self.n, self.partial_n, replace=False))
def __len__(self):
if self.partial_n is None: return super().__len__()
return self.partial_n//self.bs + (0 if self.drop_last or self.partial_n%self.bs==0 else 1)
# Cell
@patch
@delegates(Datasets.dataloaders)
def partial_dataloaders(self:FilteredBase, partial_n, bs=64, **kwargs):
"Create a partial dataloader `PartialDL` for the training set"
xtra_kwargs = [{}] * (self.n_subsets-1)
return self.dataloaders(bs=bs, dl_type=PartialDL, dl_kwargs=({'partial_n':partial_n}, *xtra_kwargs), **kwargs)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/data.py
|
data.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/18a_callback.training.ipynb (unless otherwise specified).
__all__ = ['ShortEpochCallback', 'GradientAccumulation', 'GradientClip', 'set_bn_eval', 'BnFreeze', 'bn_types']
# Cell
from ..basics import *
from .progress import *
from .fp16 import *
# Cell
class ShortEpochCallback(Callback):
"Fit just `pct` of an epoch, then stop"
def __init__(self,pct=0.01,short_valid=True): self.pct,self.short_valid = pct,short_valid
def after_batch(self):
if self.iter/self.n_iter < self.pct: return
if self.training: raise CancelTrainException
if self.short_valid: raise CancelValidException
# Cell
class GradientAccumulation(Callback):
"Accumulate gradients before updating weights"
order,run_valid = MixedPrecision.order-4,False
def __init__(self, n_acc=32): store_attr()
def before_fit(self): self.count=0
def after_loss(self): self.learn.loss_grad /= self.n_acc/find_bs(self.learn.yb)
def before_step(self):
"Skip weight update if we have not seen enough items"
self.learn.loss_grad *= self.n_acc/find_bs(self.learn.yb) # log correct loss
self.count += find_bs(self.learn.yb)
if self.count<self.n_acc: raise CancelBatchException() # skip step/zero_grad
else: self.count=0
# Cell
class GradientClip(Callback):
"Clip norm of gradients"
order=MixedPrecision.order+1
def __init__(self,max_norm:float=1., norm_type:float=2.0): store_attr()
def before_step(self): nn.utils.clip_grad_norm_(self.parameters(), self.max_norm, self.norm_type)
# Cell
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
def set_bn_eval(m:nn.Module, use_eval=True)->None:
"Set bn layers in eval mode for all recursive children of `m`."
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
if use_eval: l.eval()
else: l.train()
set_bn_eval(l)
class BnFreeze(Callback):
run_after=TrainEvalCallback
"Freeze moving average statistics in all non-trainable batchnorm layers."
def before_train(self):
set_bn_eval(self.model)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/training.py
|
training.py
|
from .core import *
from .data import *
from .fp16 import *
from .hook import *
from .mixup import *
from .progress import *
from .schedule import *
from .tracker import *
from .rnn import *
from .training import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/all.py
|
all.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/70_callback.wandb.ipynb (unless otherwise specified).
__all__ = ['WandbCallback', 'log_dataset', 'log_model', 'wandb_process']
# Cell
from ..basics import *
from .progress import *
from ..text.data import TensorText
from ..tabular.all import TabularDataLoaders, Tabular
from .hook import total_params
# Cell
import wandb
# Cell
class WandbCallback(Callback):
"Saves model topology, losses & metrics"
toward_end,remove_on_fetch,run_after = True,True,FetchPredsCallback
# Record if watch has been called previously (even in another instance)
_wandb_watch_called = False
def __init__(self, log="gradients", log_preds=True, log_model=True, log_dataset=False, dataset_name=None, valid_dl=None, n_preds=36, seed=12345, reorder=True):
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError('You must call wandb.init() before WandbCallback()')
# W&B log step
self._wandb_step = wandb.run.step - 1 # -1 except if the run has previously logged data (incremented at each batch)
self._wandb_epoch = 0 if not(wandb.run.step) else math.ceil(wandb.run.summary['epoch']) # continue to next epoch
store_attr('log,log_preds,log_model,log_dataset,dataset_name,valid_dl,n_preds,seed,reorder')
def before_fit(self):
"Call watch method to log model topology, gradients & weights"
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
# Log config parameters
log_config = self.learn.gather_args()
_format_config(log_config)
try:
wandb.config.update(log_config, allow_val_change=True)
except Exception as e:
print(f'WandbCallback could not log config parameters -> {e}')
if not WandbCallback._wandb_watch_called:
WandbCallback._wandb_watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
# log dataset
assert isinstance(self.log_dataset, (str, Path, bool)), 'log_dataset must be a path or a boolean'
if self.log_dataset is True:
if Path(self.dls.path) == Path('.'):
print('WandbCallback could not retrieve the dataset path, please provide it explicitly to "log_dataset"')
self.log_dataset = False
else:
self.log_dataset = self.dls.path
if self.log_dataset:
self.log_dataset = Path(self.log_dataset)
assert self.log_dataset.is_dir(), f'log_dataset must be a valid directory: {self.log_dataset}'
metadata = {'path relative to learner': os.path.relpath(self.log_dataset, self.learn.path)}
log_dataset(path=self.log_dataset, name=self.dataset_name, metadata=metadata)
# log model
if self.log_model and not hasattr(self, 'save_model'):
print('WandbCallback requires use of "SaveModelCallback" to log best model')
self.log_model = False
if self.log_preds:
try:
if not self.valid_dl:
#Initializes the batch watched
wandbRandom = random.Random(self.seed) # For repeatability
self.n_preds = min(self.n_preds, len(self.dls.valid_ds))
idxs = wandbRandom.sample(range(len(self.dls.valid_ds)), self.n_preds)
if isinstance(self.dls, TabularDataLoaders):
test_items = getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True, process=False)
else:
test_items = [getattr(self.dls.valid_ds.items, 'iloc', self.dls.valid_ds.items)[i] for i in idxs]
self.valid_dl = self.dls.test_dl(test_items, with_labels=True)
self.learn.add_cb(FetchPredsCallback(dl=self.valid_dl, with_input=True, with_decoded=True, reorder=self.reorder))
except Exception as e:
self.log_preds = False
print(f'WandbCallback was not able to prepare a DataLoader for logging prediction samples -> {e}')
def after_batch(self):
"Log hyper-parameters and training loss"
if self.training:
self._wandb_step += 1
self._wandb_epoch += 1/self.n_iter
hypers = {f'{k}_{i}':v for i,h in enumerate(self.opt.hypers) for k,v in h.items()}
wandb.log({'epoch': self._wandb_epoch, 'train_loss': to_detach(self.smooth_loss.clone()), 'raw_loss': to_detach(self.loss.clone()), **hypers}, step=self._wandb_step)
def log_predictions(self, preds):
inp,preds,targs,out = preds
b = tuplify(inp) + tuplify(targs)
x,y,its,outs = self.valid_dl.show_results(b, out, show=False, max_n=self.n_preds)
wandb.log(wandb_process(x, y, its, outs), step=self._wandb_step)
def after_epoch(self):
"Log validation loss and custom metrics & log prediction samples"
# Correct any epoch rounding error and overwrite value
self._wandb_epoch = round(self._wandb_epoch)
wandb.log({'epoch': self._wandb_epoch}, step=self._wandb_step)
# Log sample predictions
if self.log_preds:
try:
self.log_predictions(self.learn.fetch_preds.preds)
except Exception as e:
self.log_preds = False
print(f'WandbCallback was not able to get prediction samples -> {e}')
wandb.log({n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}, step=self._wandb_step)
def after_fit(self):
if self.log_model:
if self.save_model.last_saved_path is None:
print('WandbCallback could not retrieve a model to upload')
else:
metadata = {n:s for n,s in zip(self.recorder.metric_names, self.recorder.log) if n not in ['train_loss', 'epoch', 'time']}
log_model(self.save_model.last_saved_path, metadata=metadata)
self.run = True
if self.log_preds: self.remove_cb(FetchPredsCallback)
wandb.log({}) # ensure sync of last step
# Cell
@patch
def gather_args(self:Learner):
"Gather config parameters accessible to the learner"
# args stored by `store_attr`
cb_args = {f'{cb}':getattr(cb,'__stored_args__',True) for cb in self.cbs}
args = {'Learner':self, **cb_args}
# input dimensions
try:
n_inp = self.dls.train.n_inp
args['n_inp'] = n_inp
xb = self.dls.train.one_batch()[:n_inp]
args.update({f'input {n+1} dim {i+1}':d for n in range(n_inp) for i,d in enumerate(list(detuplify(xb[n]).shape))})
except: print(f'Could not gather input dimensions')
# other useful information
with ignore_exceptions():
args['batch size'] = self.dls.bs
args['batch per epoch'] = len(self.dls.train)
args['model parameters'] = total_params(self.model)[0]
args['device'] = self.dls.device.type
args['frozen'] = bool(self.opt.frozen_idx)
args['frozen idx'] = self.opt.frozen_idx
args['dataset.tfms'] = f'{self.dls.dataset.tfms}'
args['dls.after_item'] = f'{self.dls.after_item}'
args['dls.before_batch'] = f'{self.dls.before_batch}'
args['dls.after_batch'] = f'{self.dls.after_batch}'
return args
# Cell
def _make_plt(img):
"Make plot to image resolution"
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = img.shape[:2]
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
return fig, ax
# Cell
def _format_config_value(v):
if isinstance(v, list):
return [_format_config_value(item) for item in v]
elif hasattr(v, '__stored_args__'):
return {**_format_config(v.__stored_args__), '_name': v}
return v
# Cell
def _format_config(config):
"Format config parameters before logging them"
for k,v in config.items():
if isinstance(v, dict):
config[k] = _format_config(v)
else:
config[k] = _format_config_value(v)
return config
# Cell
def _format_metadata(metadata):
"Format metadata associated to artifacts"
for k,v in metadata.items(): metadata[k] = str(v)
# Cell
def log_dataset(path, name=None, metadata={}, description='raw dataset'):
"Log dataset folder"
# Check if wandb.init has been called in case datasets are logged manually
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_dataset()')
path = Path(path)
if not path.is_dir():
raise f'path must be a valid directory: {path}'
name = ifnone(name, path.name)
_format_metadata(metadata)
artifact_dataset = wandb.Artifact(name=name, type='dataset', metadata=metadata, description=description)
# log everything except "models" folder
for p in path.ls():
if p.is_dir():
if p.name != 'models': artifact_dataset.add_dir(str(p.resolve()), name=p.name)
else: artifact_dataset.add_file(str(p.resolve()))
wandb.run.use_artifact(artifact_dataset)
# Cell
def log_model(path, name=None, metadata={}, description='trained model'):
"Log model file"
if wandb.run is None:
raise ValueError('You must call wandb.init() before log_model()')
path = Path(path)
if not path.is_file():
raise f'path must be a valid file: {path}'
name = ifnone(name, f'run-{wandb.run.id}-model')
_format_metadata(metadata)
artifact_model = wandb.Artifact(name=name, type='model', metadata=metadata, description=description)
with artifact_model.new_file(name, mode='wb') as fa:
fa.write(path.read_bytes())
wandb.run.log_artifact(artifact_model)
# Cell
@typedispatch
def wandb_process(x:TensorImage, y, samples, outs):
"Process `sample` and `out` depending on the type of `x/y`"
res_input, res_pred, res_label = [],[],[]
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
res_input.append(wandb.Image(img, caption='Input data'))
for t, capt, res in ((o[0], "Prediction", res_pred), (s[1], "Ground Truth", res_label)):
fig, ax = _make_plt(img)
# Superimpose label or prediction to input image
ax = img.show(ctx=ax)
ax = t.show(ctx=ax)
res.append(wandb.Image(fig, caption=capt))
plt.close(fig)
return {"Inputs":res_input, "Predictions":res_pred, "Ground Truth":res_label}
# Cell
@typedispatch
def wandb_process(x:TensorImage, y:(TensorCategory,TensorMultiCategory), samples, outs):
return {"Prediction Samples": [wandb.Image(s[0].permute(1,2,0), caption=f'Ground Truth: {s[1]}\nPrediction: {o[0]}')
for s,o in zip(samples,outs)]}
# Cell
@typedispatch
def wandb_process(x:TensorImage, y:TensorMask, samples, outs):
res = []
codes = getattr(y, 'codes', None)
class_labels = {i:f'{c}' for i,c in enumerate(codes)} if codes is not None else None
for s,o in zip(samples, outs):
img = s[0].permute(1,2,0)
masks = {}
for t, capt in ((o[0], "Prediction"), (s[1], "Ground Truth")):
masks[capt] = {'mask_data':t.numpy().astype(np.uint8)}
if class_labels: masks[capt]['class_labels'] = class_labels
res.append(wandb.Image(img, masks=masks))
return {"Prediction Samples":res}
# Cell
@typedispatch
def wandb_process(x:TensorText, y:(TensorCategory,TensorMultiCategory), samples, outs):
data = [[s[0], s[1], o[0]] for s,o in zip(samples,outs)]
return {"Prediction Samples": wandb.Table(data=data, columns=["Text", "Target", "Prediction"])}
# Cell
@typedispatch
def wandb_process(x:Tabular, y:Tabular, samples, outs):
df = x.all_cols
for n in x.y_names: df[n+'_pred'] = y[n].values
return {"Prediction Samples": wandb.Table(dataframe=df)}
# Cell
#nbdev_comment _all_ = ['wandb_process']
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/wandb.py
|
wandb.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/17_callback.tracker.ipynb (unless otherwise specified).
__all__ = ['TerminateOnNaNCallback', 'TrackerCallback', 'EarlyStoppingCallback', 'SaveModelCallback',
'ReduceLROnPlateau']
# Cell
from ..basics import *
from .progress import *
from .fp16 import MixedPrecision
# Cell
class TerminateOnNaNCallback(Callback):
"A `Callback` that terminates training if loss is NaN."
order=-9
def after_batch(self):
"Test if `last_loss` is NaN and interrupts training."
if torch.isinf(self.loss) or torch.isnan(self.loss): raise CancelFitException
# Cell
class TrackerCallback(Callback):
"A `Callback` that keeps track of the best value in `monitor`."
order,remove_on_fetch = 60,True
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., reset_on_fit=True):
if comp is None: comp = np.less if 'loss' in monitor or 'error' in monitor else np.greater
if comp == np.less: min_delta *= -1
self.monitor,self.comp,self.min_delta,self.reset_on_fit,self.best= monitor,comp,min_delta,reset_on_fit,None
def before_fit(self):
"Prepare the monitored value"
self.run = not hasattr(self, "lr_finder") and not hasattr(self, "gather_preds")
if self.reset_on_fit or self.best is None: self.best = float('inf') if self.comp == np.less else -float('inf')
assert self.monitor in self.recorder.metric_names[1:]
self.idx = list(self.recorder.metric_names[1:]).index(self.monitor)
def after_epoch(self):
"Compare the last value to the best up to now"
val = self.recorder.values[-1][self.idx]
if self.comp(val - self.min_delta, self.best): self.best,self.new_best = val,True
else: self.new_best = False
def after_fit(self): self.run=True
# Cell
class EarlyStoppingCallback(TrackerCallback):
"A `TrackerCallback` that terminates training when monitored quantity stops improving."
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., patience=1, reset_on_fit=True):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
self.patience = patience
def before_fit(self): self.wait = 0; super().before_fit()
def after_epoch(self):
"Compare the value monitored to its best score and maybe stop training."
super().after_epoch()
if self.new_best: self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
print(f'No improvement since epoch {self.epoch-self.wait}: early stopping')
raise CancelFitException()
# Cell
class SaveModelCallback(TrackerCallback):
"A `TrackerCallback` that saves the model's best during training and loads it at the end."
_only_train_loop = True
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., fname='model', every_epoch=False,
with_opt=False, reset_on_fit=True):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
# keep track of file path for loggers
self.last_saved_path = None
store_attr('fname,every_epoch,with_opt')
def _save(self, name): self.last_saved_path = self.learn.save(name, with_opt=self.with_opt)
def after_epoch(self):
"Compare the value monitored to its best score and save if best."
if self.every_epoch: self._save(f'{self.fname}_{self.epoch}')
else: #every improvement
super().after_epoch()
if self.new_best:
print(f'Better model found at epoch {self.epoch} with {self.monitor} value: {self.best}.')
self._save(f'{self.fname}')
def after_fit(self, **kwargs):
"Load the best model."
if not self.every_epoch: self.learn.load(f'{self.fname}', with_opt=self.with_opt)
# Cell
class ReduceLROnPlateau(TrackerCallback):
"A `TrackerCallback` that reduces learning rate when a metric has stopped improving."
def __init__(self, monitor='valid_loss', comp=None, min_delta=0., patience=1, factor=10., min_lr=0, reset_on_fit=True):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
self.patience,self.factor,self.min_lr = patience,factor,min_lr
def before_fit(self): self.wait = 0; super().before_fit()
def after_epoch(self):
"Compare the value monitored to its best score and reduce LR by `factor` if no improvement."
super().after_epoch()
if self.new_best: self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
old_lr = self.opt.hypers[-1]['lr']
for h in self.opt.hypers: h['lr'] = max(h['lr'] / self.factor, self.min_lr)
self.wait = 0
if self.opt.hypers[-1]["lr"] < old_lr:
print(f'Epoch {self.epoch}: reducing lr to {self.opt.hypers[-1]["lr"]}')
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/tracker.py
|
tracker.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15_callback.hook.ipynb (unless otherwise specified).
__all__ = ['Hook', 'hook_output', 'Hooks', 'hook_outputs', 'dummy_eval', 'model_sizes', 'num_features_model',
'has_params', 'HookCallback', 'total_params', 'layer_info', 'module_summary', 'ActivationStats']
# Cell
from ..basics import *
# Cell
@docs
class Hook():
"Create a hook on `m` with `hook_func`."
def __init__(self, m, hook_func, is_forward=True, detach=True, cpu=False, gather=False):
store_attr('hook_func,detach,cpu,gather')
f = m.register_forward_hook if is_forward else m.register_backward_hook
self.hook = f(self.hook_fn)
self.stored,self.removed = None,False
def hook_fn(self, module, input, output):
"Applies `hook_func` to `module`, `input`, `output`."
if self.detach:
input,output = to_detach(input, cpu=self.cpu, gather=self.gather),to_detach(output, cpu=self.cpu, gather=self.gather)
self.stored = self.hook_func(module, input, output)
def remove(self):
"Remove the hook from the model."
if not self.removed:
self.hook.remove()
self.removed=True
def __enter__(self, *args): return self
def __exit__(self, *args): self.remove()
_docs = dict(__enter__="Register the hook",
__exit__="Remove the hook")
# Cell
def _hook_inner(m,i,o): return o if isinstance(o,Tensor) or is_listy(o) else list(o)
def hook_output(module, detach=True, cpu=False, grad=False):
"Return a `Hook` that stores activations of `module` in `self.stored`"
return Hook(module, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
# Cell
@docs
class Hooks():
"Create several hooks on the modules in `ms` with `hook_func`."
def __init__(self, ms, hook_func, is_forward=True, detach=True, cpu=False):
self.hooks = [Hook(m, hook_func, is_forward, detach, cpu) for m in ms]
def __getitem__(self,i): return self.hooks[i]
def __len__(self): return len(self.hooks)
def __iter__(self): return iter(self.hooks)
@property
def stored(self): return L(o.stored for o in self)
def remove(self):
"Remove the hooks from the model."
for h in self.hooks: h.remove()
def __enter__(self, *args): return self
def __exit__ (self, *args): self.remove()
_docs = dict(stored = "The states saved in each hook.",
__enter__="Register the hooks",
__exit__="Remove the hooks")
# Cell
def hook_outputs(modules, detach=True, cpu=False, grad=False):
"Return `Hooks` that store activations of all `modules` in `self.stored`"
return Hooks(modules, _hook_inner, detach=detach, cpu=cpu, is_forward=not grad)
# Cell
def dummy_eval(m, size=(64,64)):
"Evaluate `m` on a dummy input of a certain `size`"
ch_in = in_channels(m)
x = one_param(m).new(1, ch_in, *size).requires_grad_(False).uniform_(-1.,1.)
with torch.no_grad(): return m.eval()(x)
# Cell
def model_sizes(m, size=(64,64)):
"Pass a dummy input through the model `m` to get the various sizes of activations."
with hook_outputs(m) as hooks:
_ = dummy_eval(m, size=size)
return [o.stored.shape for o in hooks]
# Cell
def num_features_model(m):
"Return the number of output features for `m`."
sz,ch_in = 32,in_channels(m)
while True:
#Trying for a few sizes in case the model requires a big input size.
try:
return model_sizes(m, (sz,sz))[-1][1]
except Exception as e:
sz *= 2
if sz > 2048: raise e
# Cell
def has_params(m):
"Check if `m` has at least one parameter"
return len(list(m.parameters())) > 0
# Cell
@funcs_kwargs
class HookCallback(Callback):
"`Callback` that can be used to register hooks on `modules`"
_methods = ["hook"]
hook = noops
def __init__(self, modules=None, every=None, remove_end=True, is_forward=True, detach=True, cpu=True, **kwargs):
store_attr('modules,every,remove_end,is_forward,detach,cpu')
assert not kwargs
def before_fit(self):
"Register the `Hooks` on `self.modules`."
if self.modules is None: self.modules = [m for m in flatten_model(self.model) if has_params(m)]
if self.every is None: self._register()
def before_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._register()
def after_batch(self):
if self.every is None: return
if self.training and self.train_iter%self.every==0: self._remove()
def after_fit(self):
"Remove the `Hooks`."
if self.remove_end: self._remove()
def _register(self): self.hooks = Hooks(self.modules, self.hook, self.is_forward, self.detach, self.cpu)
def _remove(self):
if getattr(self, 'hooks', None): self.hooks.remove()
def __del__(self): self._remove()
# Cell
def total_params(m):
"Give the number of parameters of a module and if it's trainable or not"
params = sum([p.numel() for p in m.parameters()])
trains = [p.requires_grad for p in m.parameters()]
return params, (False if len(trains)==0 else trains[0])
# Cell
def layer_info(learn, *xb):
"Return layer infos of `model` on `xb` (only support batch first inputs)"
def _track(m, i, o):
params, trainable, shape = '', '', ''
same = any((x[0].shape[1:] == x[1].shape for x in zip(i, o)))
if hasattr(m, 'weight'): # non activation layer
params, trainable = total_params(m)
shape = apply(lambda x: x.shape, o)
return (type(m).__name__, params, trainable, shape, same)
with Hooks(flatten_model(learn.model), _track) as h:
batch = apply(lambda o:o[:1], xb)
train_only_cbs = [cb for cb in learn.cbs if hasattr(cb, '_only_train_loop')]
with learn.removed_cbs(train_only_cbs), learn.no_logging(), learn as l:
r = l.get_preds(dl=[batch], inner=True, reorder=False)
return h.stored
# Cell
def _print_shapes(o, bs):
if isinstance(o, (torch.Size,tuple)): return ' x '.join([str(bs)] + [str(t) for t in o[1:]])
else: return str([_print_shapes(x, bs) for x in o])
# Cell
def module_summary(learn, *xb):
"Print a summary of `model` using `xb`"
#Individual parameters wrapped in ParameterModule aren't called through the hooks in `layer_info`,
# thus are not counted inside the summary
#TODO: find a way to have them counted in param number somehow
infos = layer_info(learn, *xb)
n,bs = 76,find_bs(xb)
inp_sz = _print_shapes(apply(lambda x:x.shape, xb), bs)
res = f"{type(learn.model).__name__} (Input shape: {inp_sz})\n"
res += "=" * n + "\n"
res += f"{'Layer (type)':<20} {'Output Shape':<20} {'Param #':<10} {'Trainable':<10}\n"
res += "=" * n
ps,trn_ps,j = 0,0,0
infos = [o for o in infos if o is not None] #see comment in previous cell
prev_sz = None
for typ,np,trn,sz,chnged in infos:
if sz is None: continue
if j == 0:
res += f'\n{"":<20} {_print_shapes(sz, bs)[:19]:<20}' # to avoid a double line at the top
if not chnged and not prev_sz == sz and j > 0: res += "\n" + "_" * n + "\n" + f'{"":<20} {_print_shapes(sz, bs)[:19]:<20}'
j = 1
res += f"\n{typ:<20} {'':<20} {np:<10} {str(trn):<10}"
if np != '':
ps += np
if trn: trn_ps += np
prev_sz = sz
res += "\n" + "_" * n + "\n"
res += f"\nTotal params: {ps:,}\n"
res += f"Total trainable params: {trn_ps:,}\n"
res += f"Total non-trainable params: {ps - trn_ps:,}\n\n"
return PrettyString(res)
# Cell
@patch
def summary(self:Learner):
"Print a summary of the model, optimizer and loss function."
xb = self.dls.train.one_batch()[:self.dls.train.n_inp]
res = module_summary(self, *xb)
res += f"Optimizer used: {self.opt_func}\nLoss function: {self.loss_func}\n\n"
if self.opt is not None:
res += f"Model " + ("unfrozen\n\n" if self.opt.frozen_idx==0 else f"frozen up to parameter group #{self.opt.frozen_idx}\n\n")
res += "Callbacks:\n" + '\n'.join(f" - {cb}" for cb in self.cbs.sorted('order'))
return PrettyString(res)
# Cell
@delegates()
class ActivationStats(HookCallback):
"Callback that record the mean and std of activations."
order=-20
def __init__(self, with_hist=False, **kwargs):
super().__init__(**kwargs)
self.with_hist = with_hist
def before_fit(self):
"Initialize stats."
super().before_fit()
self.stats = L()
def hook(self, m, i, o):
if isinstance(o, tuple): return self.hook_multi_ouput(o)
o = o.float()
res = {'mean': o.mean().item(), 'std': o.std().item(),
'near_zero': (o<=0.05).long().sum().item()/o.numel()}
if self.with_hist: res['hist'] = o.histc(40,0,10)
return res
def hook_multi_ouput(self,o_tuple):
"For outputs of RNN which are [nested] tuples of tensors"
res = []
for o in self._flatten_tuple(o_tuple):
if not(isinstance(o, Tensor)): continue
res.append(self.hook(None, None, o))
return res
def _flatten_tuple(self, o_tuple):
"Recursively flatten a [nested] tuple"
res = []
for it in o_tuple:
if isinstance(it, tuple): res += self._flatten_tuple(it)
else: res += [it]
return tuple(res)
def after_batch(self):
"Take the stored results and puts it in `self.stats`"
if self.training and (self.every is None or self.train_iter%self.every == 0):
self.stats.append(self.hooks.stored)
super().after_batch()
def layer_stats(self, idx):
lstats = self.stats.itemgot(idx)
return L(lstats.itemgot(o) for o in ('mean','std','near_zero'))
def hist(self, idx):
res = self.stats.itemgot(idx).itemgot('hist')
return torch.stack(tuple(res)).t().float().log1p()
def color_dim(self, idx, figsize=(10,5), ax=None):
"The 'colorful dimension' plot"
res = self.hist(idx)
if ax is None: ax = subplots(figsize=figsize)[1][0]
ax.imshow(res, origin='lower')
ax.axis('off')
def plot_layer_stats(self, idx):
_,axs = subplots(1, 3, figsize=(12,3))
for o,ax,title in zip(self.layer_stats(idx),axs,('mean','std','% near zero')):
ax.plot(o)
ax.set_title(title)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/hook.py
|
hook.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/72_callback.neptune.ipynb (unless otherwise specified).
__all__ = ['NeptuneCallback']
# Cell
import tempfile
from ..basics import *
from ..learner import Callback
# Cell
import neptune
# Cell
class NeptuneCallback(Callback):
"Log losses, metrics, model weights, model architecture summary to neptune"
def __init__(self, log_model_weights=True, keep_experiment_running=False):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.experiment = None
if neptune.project is None:
raise ValueError('You did not initialize project in neptune.\n',
'Please invoke `neptune.init("USERNAME/PROJECT_NAME")` before this callback.')
def before_fit(self):
try:
self.experiment = neptune.get_experiment()
except ValueError:
print('No active experiment. Please invoke `neptune.create_experiment()` before this callback.')
try:
self.experiment.set_property('n_epoch', str(self.learn.n_epoch))
self.experiment.set_property('model_class', str(type(self.learn.model)))
except:
print(f'Did not log all properties. Check properties in the {neptune.get_experiment()}.')
try:
with tempfile.NamedTemporaryFile(mode='w') as f:
with open(f.name, 'w') as g:
g.write(repr(self.learn.model))
self.experiment.log_artifact(f.name, 'model_summary.txt')
except:
print('Did not log model summary. Check if your model is PyTorch model.')
if self.log_model_weights and not hasattr(self.learn, 'save_model'):
print('Unable to log model to Neptune.\n',
'Use "SaveModelCallback" to save model checkpoints that will be logged to Neptune.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.experiment.log_metric('batch__smooth_loss', self.learn.smooth_loss)
self.experiment.log_metric('batch__loss', self.learn.loss)
self.experiment.log_metric('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items():
self.experiment.log_metric(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']:
self.experiment.log_metric(f'epoch__{n}', v)
if n == 'time':
self.experiment.log_text(f'epoch__{n}', str(v))
# log model weights
if self.log_model_weights and hasattr(self.learn, 'save_model'):
if self.learn.save_model.every_epoch:
_file = join_path_file(f'{self.learn.save_model.fname}_{self.learn.save_model.epoch}',
self.learn.path / self.learn.model_dir,
ext='.pth')
else:
_file = join_path_file(self.learn.save_model.fname,
self.learn.path / self.learn.model_dir,
ext='.pth')
self.experiment.log_artifact(_file)
def after_fit(self):
if not self.keep_experiment_running:
try:
self.experiment.stop()
except:
print('No neptune experiment to stop.')
else:
print(f'Your experiment (id: {self.experiment.id}, name: {self.experiment.name}) is left in the running state.\n',
'You can log more data to it, like this: `neptune.log_metric()`')
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/neptune.py
|
neptune.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/18b_callback.preds.ipynb (unless otherwise specified).
__all__ = ['MCDropoutCallback']
# Cell
from ..basics import *
# Cell
class MCDropoutCallback(Callback):
def before_validate(self):
for m in [m for m in flatten_model(self.model) if 'dropout' in m.__class__.__name__.lower()]:
m.train()
def after_validate(self):
for m in [m for m in flatten_model(self.model) if 'dropout' in m.__class__.__name__.lower()]:
m.eval()
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/preds.py
|
preds.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/16_callback.progress.ipynb (unless otherwise specified).
__all__ = ['ProgressCallback', 'ShowGraphCallback', 'CSVLogger']
# Cell
from ..basics import *
# Cell
@docs
class ProgressCallback(Callback):
"A `Callback` to handle the display of progress bars"
order,_stateattrs = 60,('mbar','pbar')
def before_fit(self):
assert hasattr(self.learn, 'recorder')
if self.create_mbar: self.mbar = master_bar(list(range(self.n_epoch)))
if self.learn.logger != noop:
self.old_logger,self.learn.logger = self.logger,self._write_stats
self._write_stats(self.recorder.metric_names)
else: self.old_logger = noop
def before_epoch(self):
if getattr(self, 'mbar', False): self.mbar.update(self.epoch)
def before_train(self): self._launch_pbar()
def before_validate(self): self._launch_pbar()
def after_train(self): self.pbar.on_iter_end()
def after_validate(self): self.pbar.on_iter_end()
def after_batch(self):
self.pbar.update(self.iter+1)
if hasattr(self, 'smooth_loss'): self.pbar.comment = f'{self.smooth_loss:.4f}'
def _launch_pbar(self):
self.pbar = progress_bar(self.dl, parent=getattr(self, 'mbar', None), leave=False)
self.pbar.update(0)
def after_fit(self):
if getattr(self, 'mbar', False):
self.mbar.on_iter_end()
delattr(self, 'mbar')
if hasattr(self, 'old_logger'): self.learn.logger = self.old_logger
def _write_stats(self, log):
if getattr(self, 'mbar', False): self.mbar.write([f'{l:.6f}' if isinstance(l, float) else str(l) for l in log], table=True)
_docs = dict(before_fit="Setup the master bar over the epochs",
before_epoch="Update the master bar",
before_train="Launch a progress bar over the training dataloader",
before_validate="Launch a progress bar over the validation dataloader",
after_train="Close the progress bar over the training dataloader",
after_validate="Close the progress bar over the validation dataloader",
after_batch="Update the current progress bar",
after_fit="Close the master bar")
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback, Recorder, ProgressCallback]
elif ProgressCallback not in defaults.callbacks: defaults.callbacks.append(ProgressCallback)
# Cell
@patch
@contextmanager
def no_bar(self:Learner):
"Context manager that deactivates the use of progress bars"
has_progress = hasattr(self, 'progress')
if has_progress: self.remove_cb(self.progress)
try: yield self
finally:
if has_progress: self.add_cb(ProgressCallback())
# Cell
class ShowGraphCallback(Callback):
"Update a graph of training and validation loss"
order,run_valid=65,False
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
if not(self.run): return
self.nb_batches = []
assert hasattr(self.learn, 'progress')
def after_train(self): self.nb_batches.append(self.train_iter)
def after_epoch(self):
"Plot validation loss in the pbar graph"
if not self.nb_batches: return
rec = self.learn.recorder
iters = range_of(rec.losses)
val_losses = [v[1] for v in rec.values]
x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses))
y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(val_losses)))))
self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
# Cell
class CSVLogger(Callback):
"Log the results displayed in `learn.path/fname`"
order=60
def __init__(self, fname='history.csv', append=False):
self.fname,self.append = Path(fname),append
def read_log(self):
"Convenience method to quickly access the log."
return pd.read_csv(self.path/self.fname)
def before_fit(self):
"Prepare file with metric names."
if hasattr(self, "gather_preds"): return
self.path.parent.mkdir(parents=True, exist_ok=True)
self.file = (self.path/self.fname).open('a' if self.append else 'w')
self.file.write(','.join(self.recorder.metric_names) + '\n')
self.old_logger,self.learn.logger = self.logger,self._write_line
def _write_line(self, log):
"Write a line with `log` and call the old logger."
self.file.write(','.join([str(t) for t in log]) + '\n')
self.file.flush()
os.fsync(self.file.fileno())
self.old_logger(log)
def after_fit(self):
"Close the file and clean up."
if hasattr(self, "gather_preds"): return
self.file.close()
self.learn.logger = self.old_logger
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/progress.py
|
progress.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13_callback.core.ipynb (unless otherwise specified).
__all__ = ['CancelStepException', 'CancelFitException', 'CancelEpochException', 'CancelTrainException',
'CancelValidException', 'CancelBatchException', 'event', 'Callback', 'TrainEvalCallback',
'GatherPredsCallback', 'FetchPredsCallback']
# Cell
from ..data.all import *
from ..optimizer import *
# Cell
#nbdev_comment _all_ = ['CancelStepException','CancelFitException','CancelEpochException','CancelTrainException','CancelValidException','CancelBatchException']
# Cell
_events = L.split('after_create before_fit before_epoch before_train before_batch after_pred after_loss \
before_backward before_step after_cancel_step after_step after_cancel_batch after_batch after_cancel_train \
after_train before_validate after_cancel_validate after_validate after_cancel_epoch \
after_epoch after_cancel_fit after_fit')
mk_class('event', **_events.map_dict(),
doc="All possible events as attributes to get tab-completion and typo-proofing")
# Cell
#nbdev_comment _all_ = ['event']
# Cell
_inner_loop = "before_batch after_pred after_loss before_backward before_step after_step after_cancel_batch after_batch".split()
# Cell
@funcs_kwargs(as_method=True)
class Callback(Stateful,GetAttr):
"Basic class handling tweaks of the training loop by changing a `Learner` in various events"
order,_default,learn,run,run_train,run_valid = 0,'learn',None,True,True,True
_methods = _events
def __init__(self, **kwargs): assert not kwargs, f'Passed unknown events: {kwargs}'
def __repr__(self): return type(self).__name__
def __call__(self, event_name):
"Call `self.{event_name}` if it's defined"
_run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or
(self.run_valid and not getattr(self, 'training', False)))
res = None
if self.run and _run: res = getattr(self, event_name, noop)()
if event_name=='after_fit': self.run=True #Reset self.run to True at each end of fit
return res
def __setattr__(self, name, value):
if hasattr(self.learn,name):
warn(f"You are shadowing an attribute ({name}) that exists in the learner. Use `self.learn.{name}` to avoid this")
super().__setattr__(name, value)
@property
def name(self):
"Name of the `Callback`, camel-cased and with '*Callback*' removed"
return class2attr(self, 'Callback')
# Cell
class TrainEvalCallback(Callback):
"`Callback` that tracks the number of iterations done and properly sets training/eval mode"
order,run_valid = -10,False
def after_create(self): self.learn.n_epoch = 1
def before_fit(self):
"Set the iter and epoch counters to 0, put the model and the right device"
self.learn.epoch,self.learn.loss = 0,tensor(0.)
self.learn.train_iter,self.learn.pct_train = 0,0.
if hasattr(self.dls, 'device'): self.model.to(self.dls.device)
if hasattr(self.model, 'reset'): self.model.reset()
def after_batch(self):
"Update the iter counter (in training mode)"
self.learn.pct_train += 1./(self.n_iter*self.n_epoch)
self.learn.train_iter += 1
def before_train(self):
"Set the model in training mode"
self.learn.pct_train=self.epoch/self.n_epoch
self.model.train()
self.learn.training=True
def before_validate(self):
"Set the model in validation mode"
self.model.eval()
self.learn.training=False
# Cell
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback]
# Cell
_ex_docs = dict(
CancelBatchException="Skip the rest of this batch and go to `after_batch`",
CancelTrainException="Skip the rest of the training part of the epoch and go to `after_train`",
CancelValidException="Skip the rest of the validation part of the epoch and go to `after_validate`",
CancelEpochException="Skip the rest of this epoch and go to `after_epoch`",
CancelStepException ="Skip stepping the optimizer",
CancelFitException ="Interrupts training and go to `after_fit`")
for c,d in _ex_docs.items(): mk_class(c,sup=Exception,doc=d)
# Cell
#TODO: save_targs and save_preds only handle preds/targets that have one tensor, not tuples of tensors.
class GatherPredsCallback(Callback):
"`Callback` that saves the predictions and targets, optionally `with_loss`"
_stateattrs=('preds','targets','inputs','losses')
def __init__(self, with_input=False, with_loss=False, save_preds=None, save_targs=None, concat_dim=0):
store_attr("with_input,with_loss,save_preds,save_targs,concat_dim")
def before_batch(self):
if self.with_input: self.inputs.append((self.learn.to_detach(self.xb)))
def before_validate(self):
"Initialize containers"
self.preds,self.targets = [],[]
if self.with_input: self.inputs = []
if self.with_loss: self.losses = []
def after_batch(self):
"Save predictions, targets and potentially losses"
if not hasattr(self, 'pred'): return
preds,targs = self.learn.to_detach(self.pred),self.learn.to_detach(self.yb)
if self.save_preds is None: self.preds.append(preds)
else: (self.save_preds/str(self.iter)).save_array(preds)
if self.save_targs is None: self.targets.append(targs)
else: (self.save_targs/str(self.iter)).save_array(targs[0])
if self.with_loss:
bs = find_bs(self.yb)
loss = self.loss if self.loss.numel() == bs else self.loss.view(bs,-1).mean(1)
self.losses.append(self.learn.to_detach(loss))
def after_validate(self):
"Concatenate all recorded tensors"
if not hasattr(self, 'preds'): return
if self.with_input: self.inputs = detuplify(to_concat(self.inputs, dim=self.concat_dim))
if not self.save_preds: self.preds = detuplify(to_concat(self.preds, dim=self.concat_dim))
if not self.save_targs: self.targets = detuplify(to_concat(self.targets, dim=self.concat_dim))
if self.with_loss: self.losses = to_concat(self.losses)
def all_tensors(self):
res = [None if self.save_preds else self.preds, None if self.save_targs else self.targets]
if self.with_input: res = [self.inputs] + res
if self.with_loss: res.append(self.losses)
return res
# Cell
class FetchPredsCallback(Callback):
"A callback to fetch predictions during the training loop"
remove_on_fetch = True
def __init__(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, cbs=None, reorder=True):
self.cbs = L(cbs)
store_attr('ds_idx,dl,with_input,with_decoded,reorder')
def after_validate(self):
to_rm = L(cb for cb in self.learn.cbs if getattr(cb, 'remove_on_fetch', False))
with self.learn.removed_cbs(to_rm + self.cbs) as learn:
self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl,
with_input=self.with_input, with_decoded=self.with_decoded, inner=True, reorder=self.reorder)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/core.py
|
core.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/19_callback.mixup.ipynb (unless otherwise specified).
__all__ = ['reduce_loss', 'MixHandler', 'MixUp', 'CutMix']
# Cell
from ..basics import *
from torch.distributions.beta import Beta
# Cell
def reduce_loss(loss, reduction='mean'):
"Reduce the loss based on `reduction`"
return loss.mean() if reduction == 'mean' else loss.sum() if reduction == 'sum' else loss
# Cell
class MixHandler(Callback):
"A handler class for implementing `MixUp` style scheduling"
run_valid = False
def __init__(self, alpha=0.5):
self.distrib = Beta(tensor(alpha), tensor(alpha))
def before_fit(self):
self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.loss_func
def after_fit(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def lf(self, pred, *yb):
if not self.training: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
# Cell
class MixUp(MixHandler):
"Implementation of https://arxiv.org/abs/1710.09412"
def __init__(self, alpha=.4): super().__init__(alpha)
def before_batch(self):
lam = self.distrib.sample((self.y.size(0),)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam], 1)
self.lam = lam.max(1)[0]
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
self.learn.xb = tuple(L(xb1,self.xb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=nx_dims-1)))
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
# Cell
class CutMix(MixHandler):
"Implementation of `https://arxiv.org/abs/1905.04899`"
def __init__(self, alpha=1.): super().__init__(alpha)
def before_batch(self):
bs, _, H, W = self.x.size()
self.lam = self.distrib.sample((1,))
shuffle = torch.randperm(bs)
xb1,self.yb1 = self.x[shuffle], tuple((self.y[shuffle],))
x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
self.learn.xb[0][..., y1:y2, x1:x2] = xb1[..., y1:y2, x1:x2]
self.lam = (1 - ((x2-x1)*(y2-y1))/float(W*H)).item()
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def rand_bbox(self, W, H, lam):
cut_rat = torch.sqrt(1. - lam)
cut_w = torch.round(W * cut_rat).type(torch.long)
cut_h = torch.round(H * cut_rat).type(torch.long)
# uniform
cx = torch.randint(0, W, (1,))
cy = torch.randint(0, H, (1,))
x1 = torch.clamp(cx - cut_w // 2, 0, W)
y1 = torch.clamp(cy - cut_h // 2, 0, H)
x2 = torch.clamp(cx + cut_w // 2, 0, W)
y2 = torch.clamp(cy + cut_h // 2, 0, H)
return x1, y1, x2, y2
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/mixup.py
|
mixup.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/18_callback.fp16.ipynb (unless otherwise specified).
__all__ = ['MixedPrecision', 'FP16TestCallback', 'get_master', 'to_master_grads', 'to_model_params', 'test_overflow',
'grad_overflow', 'copy_clone', 'ModelToHalf', 'NonNativeMixedPrecision']
# Cell
from ..basics import *
from .progress import *
from torch.cuda.amp import GradScaler,autocast
from torch.cuda.amp.grad_scaler import OptState
# Cell
@delegates(GradScaler)
class MixedPrecision(Callback):
"Mixed precision training using Pytorch's `autocast` and `GradScaler`"
order = 10
def __init__(self, **kwargs): self.kwargs,self.autocast = kwargs,autocast()
def before_fit(self): self.learn.scaler,self.scales = GradScaler(**self.kwargs),L()
def before_batch(self): self.autocast.__enter__()
def after_pred(self):
if self.pred.dtype==torch.float16: self.learn.pred = to_float(self.pred)
def after_loss(self): self.autocast.__exit__()
def before_backward(self): self.learn.loss_grad = self.scaler.scale(self.loss_grad)
def before_step(self):
self.skipped=True
self.scaler.step(self)
if self.skipped: raise CancelStepException()
self.scales.append(self.scaler.get_scale())
def after_step(self): self.learn.scaler.update()
@property # pretend to be an optimizer for `GradScaler`
def param_groups(self): return self.opt.param_groups
def step(self, *args, **kwargs): self.skipped=False
# Cell
class FP16TestCallback(Callback):
"Asserts that predictions are `float16` values"
order = 9
def after_pred(self): assert self.pred.dtype==torch.float16
# Cell
@patch
@delegates(GradScaler)
def to_fp16(self:Learner, **kwargs): return self.add_cb(MixedPrecision(**kwargs))
# Cell
@patch
def to_fp32(self:Learner): return self.remove_cb(MixedPrecision)
# Cell
from ..fp16_utils import convert_network, model_grads_to_master_grads, master_params_to_model_params
# Cell
from torch.nn.utils import parameters_to_vector
# Cell
def get_master(opt, flat_master=False):
model_params = [[param for param in pg if getattr(param, 'requires_grad', False) and hasattr(param, 'data')] for pg in opt.param_lists]
if flat_master:
master_params = []
for pg in model_params:
mp = parameters_to_vector([param.data.float() for param in pg])
mp = nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params = [[nn.Parameter(param.data.clone().float().detach(), requires_grad=True) for param in pg] for pg in model_params]
return model_params, master_params
# Cell
def to_master_grads(model_pgs, master_pgs, flat_master=False):
for (model_params,master_params) in zip(model_pgs,master_pgs):
model_grads_to_master_grads(model_params, master_params, flat_master=flat_master)
# Cell
def to_model_params(model_pgs, master_pgs, flat_master=False)->None:
for (model_params,master_params) in zip(model_pgs,master_pgs):
master_params_to_model_params(model_params, master_params, flat_master=flat_master)
# Cell
def test_overflow(x):
s = float(x.float().sum())
return (s == float('inf') or s == float('-inf') or s != s)
# Cell
def grad_overflow(pgs):
for pg in pgs:
for p in pg:
if p.grad is not None and test_overflow(p.grad.data): return True
return False
# Cell
def copy_clone(d):
return {k:(v.detach().clone().float() if isinstance(v,Tensor) else v) for k,v in d.items()}
# Cell
def _copy_state(opt, pgs1, pgs2):
opt.param_lists = pgs2
for pg1,pg2 in zip(pgs1, pgs2):
for p1,p2 in zip(pg1, pg2): opt.state[p2] = copy_clone(opt.state.pop(p1, {}))
# Cell
class ModelToHalf(Callback):
"Use with NonNativeMixedPrecision callback (but it needs to run at the very beginning)"
order=-50
def before_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float16)
def after_fit (self): self.learn.model = convert_network(self.model, dtype=torch.float32)
# Cell
@docs
class NonNativeMixedPrecision(Callback):
"Run training in mixed precision"
order=10
def __init__(self, loss_scale=512, flat_master=False, dynamic=True, max_loss_scale=2.**24,
div_factor=2., scale_wait=500, clip=None):
assert torch.backends.cudnn.enabled, "Mixed precision training requires cudnn."
self.flat_master,self.dynamic,self.max_loss_scale = flat_master,dynamic,max_loss_scale
self.div_factor,self.scale_wait,self.clip = div_factor,scale_wait,clip
self.loss_scale = max_loss_scale if dynamic else loss_scale
def before_fit(self):
assert self.dls.device.type == 'cuda', "Mixed-precision training requires a GPU, remove the call `to_fp16`"
if self.learn.opt is None: self.learn.create_opt()
self.model_pgs,self.master_pgs = get_master(self.opt, self.flat_master)
self.old_pgs = self.opt.param_lists
#Changes the optimizer so that the optimization step is done in FP32.
_copy_state(self.learn.opt, self.model_pgs, self.master_pgs)
if self.dynamic: self.count = 0
def before_batch(self): self.learn.xb = to_half(self.xb)
def after_pred(self): self.learn.pred = to_float(self.pred)
def before_backward(self): self.learn.loss_grad *= self.loss_scale
def before_step(self):
#First, check for an overflow
if self.dynamic and grad_overflow(self.model_pgs):
self.loss_scale /= self.div_factor
self.learn.loss_grad /= self.div_factor #to record correct loss
self.model.zero_grad()
raise CancelBatchException() #skip step and zero_grad
to_master_grads(self.model_pgs, self.master_pgs, self.flat_master)
for master_params in self.master_pgs:
for param in master_params:
if param.grad is not None: param.grad.div_(self.loss_scale)
if self.clip is not None:
for group in self.master_pgs: nn.utils.clip_grad_norm_(group, self.clip)
# Check if it's been long enough without overflow
if self.dynamic:
self.count += 1
if self.count == self.scale_wait:
self.count = 0
self.loss_scale *= self.div_factor
def after_step(self):
self.model.zero_grad() #Zero the gradients of the model manually (optimizer disconnected)
to_model_params(self.model_pgs, self.master_pgs, self.flat_master)
def after_batch(self):
if self.training: self.learn.loss_grad /= self.loss_scale #Log correct loss
def after_fit(self):
if not hasattr(self,'master_pgs'): return
_copy_state(self.learn.opt, self.master_pgs, self.model_pgs)
self.learn.opt.param_lists = self.old_pgs
delattr(self, "master_pgs")
delattr(self, "model_pgs")
delattr(self, "old_pgs")
_docs = dict(before_fit="Put the model in FP16 and prepare the two copies of the parameters",
before_batch="Put the input in FP16",
after_pred="Put the output back to FP32 so that the loss is computed in FP32",
before_backward="Apply loss scaling to avoid gradient underflow",
before_step="Copy the gradients to the master param and undo the loss scaling",
after_step="Copy the master params to the model params",
after_batch="Ensure loss is logged correctly",
after_fit="Put the model back in FP32")
# Cell
@patch
@delegates(NonNativeMixedPrecision.__init__)
def to_to_non_native_fp16(self:Learner, **kwargs): return self.add_cbs([ModelToHalf(), NonNativeMixedPrecision(**kwargs)])
# Cell
@patch
def to_non_native_fp32(self: Learner): return self.remove_cbs([ModelToHalf, NonNativeMixedPrecision])
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/fp16.py
|
fp16.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/34_callback.rnn.ipynb (unless otherwise specified).
__all__ = ['ModelResetter', 'RNNCallback', 'RNNRegularizer', 'rnn_cbs']
# Cell
from ..basics import *
# Cell
@docs
class ModelResetter(Callback):
"`Callback` that resets the model at each validation/training step"
def before_train(self): self.model.reset()
def before_validate(self): self.model.reset()
def after_fit(self): self.model.reset()
_docs = dict(before_train="Reset the model before training",
before_validate="Reset the model before validation",
after_fit="Reset the model after fitting")
# Cell
class RNNCallback(Callback):
"Save the raw and dropped-out outputs and only keep the true output for loss computation"
def after_pred(self): self.learn.pred,self.raw_out,self.out = [o[-1] if is_listy(o) else o for o in self.pred]
# Cell
class RNNRegularizer(Callback):
"Add AR and TAR regularization"
order,run_valid = RNNCallback.order+1,False
def __init__(self, alpha=0., beta=0.): store_attr()
def after_loss(self):
if not self.training: return
if self.alpha: self.learn.loss_grad += self.alpha * self.rnn.out.float().pow(2).mean()
if self.beta:
h = self.rnn.raw_out
if len(h)>1: self.learn.loss_grad += self.beta * (h[:,1:] - h[:,:-1]).float().pow(2).mean()
# Cell
def rnn_cbs(alpha=0., beta=0.):
"All callbacks needed for (optionally regularized) RNN training"
reg = [RNNRegularizer(alpha=alpha, beta=beta)] if alpha or beta else []
return [ModelResetter(), RNNCallback()] + reg
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/rnn.py
|
rnn.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/73_callback.captum.ipynb (unless otherwise specified).
__all__ = ['json_clean', 'CaptumInterpretation']
# Cell
import tempfile
from ..basics import *
# Cell
from ipykernel import jsonutil
# Cell
# Dirty hack as json_clean doesn't support CategoryMap type
_json_clean=jsonutil.json_clean
def json_clean(o):
o = list(o.items) if isinstance(o,CategoryMap) else o
return _json_clean(o)
jsonutil.json_clean = json_clean
# Cell
from captum.attr import IntegratedGradients,NoiseTunnel,GradientShap,Occlusion
from captum.attr import visualization as viz
from matplotlib.colors import LinearSegmentedColormap
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.features import ImageFeature
# Cell
class CaptumInterpretation():
"Captum Interpretation for Resnet"
def __init__(self,learn,cmap_name='custom blue',colors=None,N=256,methods=['original_image','heat_map'],signs=["all", "positive"],outlier_perc=1):
store_attr('learn,cmap_name,colors,N,methods,signs,outlier_perc')
self.colors = [(0, '#ffffff'),(0.25, '#000000'),(1, '#000000')] if self.colors is None else self.colors
self.dls=learn.dls
self.model=self.learn.model
self.supported_metrics=['IG','NT','Occl']
def get_baseline_img(self, img_tensor,baseline_type):
baseline_img=None
if baseline_type=='zeros': baseline_img= img_tensor*0
if baseline_type=='uniform': baseline_img= torch.rand(img_tensor.shape)
if baseline_type=='gauss':
baseline_img= (torch.rand(img_tensor.shape).to(self.dls.device)+img_tensor)/2
return baseline_img.to(self.dls.device)
def visualize(self,inp,metric='IG',n_steps=1000,baseline_type='zeros',nt_type='smoothgrad',strides = (3, 4, 4), sliding_window_shapes=(3,15, 15)):
if metric not in self.supported_metrics:
raise Exception(f"Metric {metric} is not supported. Currently {self.supported_metrics} are only supported")
tls = L([TfmdLists(inp, t) for t in L(ifnone(self.dls.tfms,[None]))])
inp_data=list(zip(*(tls[0],tls[1])))[0]
# Get Data
enc_data,dec_data=self._get_enc_dec_data(inp_data)
# Get Required Metrics
attributions=self._get_attributions(enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes)
#Visualise the attributions
self._viz(attributions,dec_data,metric)
def _viz(self,attributions,dec_data,metric):
default_cmap = LinearSegmentedColormap.from_list(self.cmap_name,self.colors, N=self.N)
_ = viz.visualize_image_attr_multiple(np.transpose(attributions.squeeze().cpu().detach().numpy(), (1,2,0)),
np.transpose(dec_data[0].numpy(), (1,2,0)),
methods=self.methods,
cmap=default_cmap,
show_colorbar=True,
signs=self.signs,
outlier_perc=self.outlier_perc, titles=[f'Original Image - ({dec_data[1]})', metric])
def _get_enc_dec_data(self,inp_data):
dec_data=self.dls.after_item(inp_data)
enc_data=self.dls.after_batch(to_device(self.dls.before_batch(dec_data),self.dls.device))
return(enc_data,dec_data)
def _get_attributions(self,enc_data,metric,n_steps,nt_type,baseline_type,strides,sliding_window_shapes):
# Get Baseline
baseline=self.get_baseline_img(enc_data[0],baseline_type)
supported_metrics ={}
if metric == 'IG':
self._integrated_gradients = self._integrated_gradients if hasattr(self,'_integrated_gradients') else IntegratedGradients(self.model)
return self._integrated_gradients.attribute(enc_data[0],baseline, target=enc_data[1], n_steps=200)
elif metric == 'NT':
self._integrated_gradients = self._integrated_gradients if hasattr(self,'_integrated_gradients') else IntegratedGradients(self.model)
self._noise_tunnel= self._noise_tunnel if hasattr(self,'_noise_tunnel') else NoiseTunnel(self._integrated_gradients)
return self._noise_tunnel.attribute(enc_data[0].to(self.dls.device), n_samples=1, nt_type=nt_type, target=enc_data[1])
elif metric == 'Occl':
self._occlusion = self._occlusion if hasattr(self,'_occlusion') else Occlusion(self.model)
return self._occlusion.attribute(enc_data[0].to(self.dls.device),
strides = strides,
target=enc_data[1],
sliding_window_shapes=sliding_window_shapes,
baselines=baseline)
# Cell
@patch
def insights(x: CaptumInterpretation,inp_data,debug=True):
_baseline_func= lambda o: o*0
_get_vocab = lambda vocab: list(map(str,vocab)) if isinstance(vocab[0],bool) else vocab
dl = x.dls.test_dl(L(inp_data),with_labels=True, bs=4)
normalize_func= next((func for func in dl.after_batch if type(func)==Normalize),noop)
# captum v0.3 expects tensors without the batch dimension.
if hasattr(normalize_func, 'mean'):
if normalize_func.mean.ndim==4: normalize_func.mean.squeeze_(0)
if hasattr(normalize_func, 'std'):
if normalize_func.std.ndim==4: normalize_func.std.squeeze_(0)
visualizer = AttributionVisualizer(
models=[x.model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=_get_vocab(dl.vocab),
features=[
ImageFeature(
"Image",
baseline_transforms=[_baseline_func],
input_transforms=[normalize_func],
)
],
dataset=x._formatted_data_iter(dl,normalize_func)
)
visualizer.render(debug=debug)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/captum.py
|
captum.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/74_callback.cutmix.ipynb (unless otherwise specified).
__all__ = ['CutMix']
# Cell
from torch.distributions.beta import Beta
from ..vision.all import *
# Cell
class CutMix(Callback):
"Implementation of `https://arxiv.org/abs/1905.04899`"
run_after,run_valid = [Normalize],False
def __init__(self, alpha=1.): self.distrib = Beta(tensor(alpha), tensor(alpha))
def before_fit(self):
self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
if self.stack_y: self.old_lf,self.learn.loss_func = self.learn.loss_func,self.lf
def after_fit(self):
if self.stack_y: self.learn.loss_func = self.old_lf
def before_batch(self):
W, H = self.xb[0].size(3), self.xb[0].size(2)
lam = self.distrib.sample((1,)).squeeze().to(self.x.device)
lam = torch.stack([lam, 1-lam])
self.lam = lam.max()
shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
xb1,self.yb1 = tuple(L(self.xb).itemgot(shuffle)),tuple(L(self.yb).itemgot(shuffle))
nx_dims = len(self.x.size())
x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
self.learn.xb[0][:, :, x1:x2, y1:y2] = xb1[0][:, :, x1:x2, y1:y2]
self.lam = (1 - ((x2-x1)*(y2-y1))/float(W*H)).item()
if not self.stack_y:
ny_dims = len(self.y.size())
self.learn.yb = tuple(L(self.yb1,self.yb).map_zip(torch.lerp,weight=unsqueeze(self.lam, n=ny_dims-1)))
def lf(self, pred, *yb):
if not self.training: return self.old_lf(pred, *yb)
with NoneReduce(self.old_lf) as lf:
loss = torch.lerp(lf(pred,*self.yb1), lf(pred,*yb), self.lam)
return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
def rand_bbox(self, W, H, lam):
cut_rat = torch.sqrt(1. - lam)
cut_w = (W * cut_rat).type(torch.long)
cut_h = (H * cut_rat).type(torch.long)
# uniform
cx = torch.randint(0, W, (1,)).to(self.x.device)
cy = torch.randint(0, H, (1,)).to(self.x.device)
x1 = torch.clamp(cx - cut_w // 2, 0, W)
y1 = torch.clamp(cy - cut_h // 2, 0, H)
x2 = torch.clamp(cx + cut_w // 2, 0, W)
y2 = torch.clamp(cy + cut_h // 2, 0, H)
return x1, y1, x2, y2
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/cutmix.py
|
cutmix.py
|
from .all import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/callback/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/60_medical.imaging.ipynb (unless otherwise specified).
__all__ = ['DcmDataset', 'DcmTag', 'DcmMultiValue', 'dcmread', 'get_dicom_files', 'DicomSegmentationDataLoaders',
'get_dicom_files', 'TensorDicom', 'PILDicom', 'pixels', 'scaled_px', 'array_freqhist_bins', 'dicom_windows',
'TensorCTScan', 'PILCTScan', 'uniform_blur2d', 'gauss_blur2d', 'mask2bbox', 'crop_resize', 'shape',
'DicomSegmentationDataLoaders']
# Cell
from ..basics import *
from ..vision.all import *
from ..data.transforms import *
import pydicom,kornia,skimage
from pydicom.dataset import Dataset as DcmDataset
from pydicom.tag import BaseTag as DcmTag
from pydicom.multival import MultiValue as DcmMultiValue
from PIL import Image
try:
import cv2
cv2.setNumThreads(0)
except: pass
# Cell
#nbdev_comment _all_ = ['DcmDataset', 'DcmTag', 'DcmMultiValue', 'dcmread', 'get_dicom_files', 'DicomSegmentationDataLoaders']
# Cell
def get_dicom_files(path, recurse=True, folders=None):
"Get dicom files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=[".dcm",".dicom"], recurse=recurse, folders=folders)
# Cell
@patch
def dcmread(fn:Path, force = False):
"Open a `DICOM` file"
return pydicom.dcmread(str(fn), force)
# Cell
class TensorDicom(TensorImage):
"Inherits from `TensorImage` and converts the `pixel_array` into a `TensorDicom`"
_show_args = {'cmap':'gray'}
# Cell
class PILDicom(PILBase):
_open_args,_tensor_cls,_show_args = {},TensorDicom,TensorDicom._show_args
@classmethod
def create(cls, fn:(Path,str,bytes), mode=None)->None:
"Open a `DICOM file` from path `fn` or bytes `fn` and load it as a `PIL Image`"
if isinstance(fn,bytes): im = Image.fromarray(pydicom.dcmread(pydicom.filebase.DicomBytesIO(fn)).pixel_array)
if isinstance(fn,(Path,str)): im = Image.fromarray(pydicom.dcmread(fn).pixel_array)
im.load()
im = im._new(im.im)
return cls(im.convert(mode) if mode else im)
PILDicom._tensor_cls = TensorDicom
# Cell
@patch
def png16read(self:Path): return array(Image.open(self), dtype=np.uint16)
# Cell
@patch(as_prop=True)
def pixels(self:DcmDataset):
"`pixel_array` as a tensor"
return tensor(self.pixel_array.astype(np.float32))
# Cell
@patch(as_prop=True)
def scaled_px(self:DcmDataset):
"`pixels` scaled by `RescaleSlope` and `RescaleIntercept`"
img = self.pixels
if hasattr(self, 'RescaleSlope') and hasattr(self, 'RescaleIntercept') is not None:
return img * self.RescaleSlope + self.RescaleIntercept
else: return img
# Cell
def array_freqhist_bins(self, n_bins=100):
"A numpy based function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = np.sort(self.flatten())
t = np.array([0.001])
t = np.append(t, np.arange(n_bins)/n_bins+(1/2/n_bins))
t = np.append(t, 0.999)
t = (len(imsd)*t+0.5).astype(np.int)
return np.unique(imsd[t])
# Cell
@patch
def freqhist_bins(self:Tensor, n_bins=100):
"A function to split the range of pixel values into groups, such that each group has around the same number of pixels"
imsd = self.view(-1).sort()[0]
t = torch.cat([tensor([0.001]),
torch.arange(n_bins).float()/n_bins+(1/2/n_bins),
tensor([0.999])])
t = (len(imsd)*t).long()
return imsd[t].unique()
# Cell
@patch
def hist_scaled_pt(self:Tensor, brks=None):
# Pytorch-only version - switch to this if/when interp_1d can be optimized
if brks is None: brks = self.freqhist_bins()
brks = brks.to(self.device)
ys = torch.linspace(0., 1., len(brks)).to(self.device)
return self.flatten().interp_1d(brks, ys).reshape(self.shape).clamp(0.,1.)
# Cell
@patch
def hist_scaled(self:Tensor, brks=None):
"Scales a tensor using `freqhist_bins` to values between 0 and 1"
if self.device.type=='cuda': return self.hist_scaled_pt(brks)
if brks is None: brks = self.freqhist_bins()
ys = np.linspace(0., 1., len(brks))
x = self.numpy().flatten()
x = np.interp(x, brks.numpy(), ys)
return tensor(x).reshape(self.shape).clamp(0.,1.)
# Cell
@patch
def hist_scaled(self:DcmDataset, brks=None, min_px=None, max_px=None):
"Pixels scaled to a `min_px` and `max_px` value"
px = self.scaled_px
if min_px is not None: px[px<min_px] = min_px
if max_px is not None: px[px>max_px] = max_px
return px.hist_scaled(brks=brks)
# Cell
@patch
def windowed(self:Tensor, w, l):
"Scale pixel intensity by window width and window level"
px = self.clone()
px_min = l - w//2
px_max = l + w//2
px[px<px_min] = px_min
px[px>px_max] = px_max
return (px-px_min) / (px_max-px_min)
# Cell
@patch
def windowed(self:DcmDataset, w, l):
return self.scaled_px.windowed(w,l)
# Cell
# From https://radiopaedia.org/articles/windowing-ct
dicom_windows = types.SimpleNamespace(
brain=(80,40),
subdural=(254,100),
stroke=(8,32),
brain_bone=(2800,600),
brain_soft=(375,40),
lungs=(1500,-600),
mediastinum=(350,50),
abdomen_soft=(400,50),
liver=(150,30),
spine_soft=(250,50),
spine_bone=(1800,400)
)
# Cell
class TensorCTScan(TensorImageBW):
"Inherits from `TensorImageBW` and converts the `pixel_array` into a `TensorCTScan`"
_show_args = {'cmap':'bone'}
# Cell
class PILCTScan(PILBase): _open_args,_tensor_cls,_show_args = {},TensorCTScan,TensorCTScan._show_args
# Cell
@patch
@delegates(show_image)
def show(self:DcmDataset, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Display a normalized dicom image by default"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
show_image(px, cmap=cmap, **kwargs)
# Cell
@patch
def show(self:DcmDataset, frames=1, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
"Adds functionality to view dicom images where each file may have more than 1 frame"
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
if px.ndim > 2:
gh=[]
p = px.shape; print(f'{p[0]} frames per file')
for i in range(frames): u = px[i]; gh.append(u)
show_images(gh, **kwargs)
else: show_image(px, cmap=cmap, **kwargs)
# Cell
@patch
def pct_in_window(dcm:DcmDataset, w, l):
"% of pixels in the window `(w,l)`"
px = dcm.scaled_px
return ((px > l-w//2) & (px < l+w//2)).float().mean().item()
# Cell
def uniform_blur2d(x,s):
"Uniformly apply blurring"
w = x.new_ones(1,1,1,s)/s
# Factor 2d conv into 2 1d convs
x = unsqueeze(x, dim=0, n=4-x.dim())
r = (F.conv2d(x, w, padding=s//2))
r = (F.conv2d(r, w.transpose(-1,-2), padding=s//2)).cpu()[:,0]
return r.squeeze()
# Cell
def gauss_blur2d(x,s):
"Apply gaussian_blur2d kornia filter"
s2 = int(s/4)*2+1
x2 = unsqueeze(x, dim=0, n=4-x.dim())
res = kornia.filters.gaussian_blur2d(x2, (s2,s2), (s,s), 'replicate')
return res.squeeze()
# Cell
@patch
def mask_from_blur(x:Tensor, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
p = x.windowed(*window)
if remove_max: p[p==1] = 0
return gauss_blur2d(p, s=sigma*x.shape[-1])>thresh
# Cell
@patch
def mask_from_blur(x:DcmDataset, window, sigma=0.3, thresh=0.05, remove_max=True):
"Create a mask from the blurred image"
return to_device(x.scaled_px).mask_from_blur(window, sigma, thresh, remove_max=remove_max)
# Cell
def _px_bounds(x, dim):
c = x.sum(dim).nonzero().cpu()
idxs,vals = torch.unique(c[:,0],return_counts=True)
vs = torch.split_with_sizes(c[:,1],tuple(vals))
d = {k.item():v for k,v in zip(idxs,vs)}
default_u = tensor([0,x.shape[-1]-1])
b = [d.get(o,default_u) for o in range(x.shape[0])]
b = [tensor([o.min(),o.max()]) for o in b]
return torch.stack(b)
# Cell
def mask2bbox(mask):
no_batch = mask.dim()==2
if no_batch: mask = mask[None]
bb1 = _px_bounds(mask,-1).t()
bb2 = _px_bounds(mask,-2).t()
res = torch.stack([bb1,bb2],dim=1).to(mask.device)
return res[...,0] if no_batch else res
# Cell
def _bbs2sizes(crops, init_sz, use_square=True):
bb = crops.flip(1)
szs = (bb[1]-bb[0])
if use_square: szs = szs.max(0)[0][None].repeat((2,1))
overs = (szs+bb[0])>init_sz
bb[0][overs] = init_sz-szs[overs]
lows = (bb[0]/float(init_sz))
return lows,szs/float(init_sz)
# Cell
def crop_resize(x, crops, new_sz):
# NB assumes square inputs. Not tested for non-square anythings!
bs = x.shape[0]
lows,szs = _bbs2sizes(crops, x.shape[-1])
if not isinstance(new_sz,(list,tuple)): new_sz = (new_sz,new_sz)
id_mat = tensor([[1.,0,0],[0,1,0]])[None].repeat((bs,1,1)).to(x.device)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
sp = F.affine_grid(id_mat, (bs,1,*new_sz))+1.
grid = sp*unsqueeze(szs.t(),1,n=2)+unsqueeze(lows.t()*2.,1,n=2)
return F.grid_sample(x.unsqueeze(1), grid-1)
# Cell
@patch
def to_nchan(x:Tensor, wins, bins=None):
res = [x.windowed(*win) for win in wins]
if not isinstance(bins,int) or bins!=0: res.append(x.hist_scaled(bins).clamp(0,1))
dim = [0,1][x.dim()==3]
return TensorCTScan(torch.stack(res, dim=dim))
# Cell
@patch
def to_nchan(x:DcmDataset, wins, bins=None):
return x.scaled_px.to_nchan(wins, bins)
# Cell
@patch
def to_3chan(x:Tensor, win1, win2, bins=None):
return x.to_nchan([win1,win2],bins=bins)
# Cell
@patch
def to_3chan(x:DcmDataset, win1, win2, bins=None):
return x.scaled_px.to_3chan(win1, win2, bins)
# Cell
@patch
def save_jpg(x:(Tensor,DcmDataset), path, wins, bins=None, quality=90):
"Save tensor or dicom image into `jpg` format"
fn = Path(path).with_suffix('.jpg')
x = (x.to_nchan(wins, bins)*255).byte()
im = Image.fromarray(x.permute(1,2,0).numpy(), mode=['RGB','CMYK'][x.shape[0]==4])
im.save(fn, quality=quality)
# Cell
@patch
def to_uint16(x:(Tensor,DcmDataset), bins=None):
"Convert into a unit16 array"
d = x.hist_scaled(bins).clamp(0,1) * 2**16
return d.numpy().astype(np.uint16)
# Cell
@patch
def save_tif16(x:(Tensor,DcmDataset), path, bins=None, compress=True):
"Save tensor or dicom image into `tiff` format"
fn = Path(path).with_suffix('.tif')
Image.fromarray(x.to_uint16(bins)).save(str(fn), compression='tiff_deflate' if compress else None)
# Cell
@patch
def set_pixels(self:DcmDataset, px):
self.PixelData = px.tobytes()
self.Rows,self.Columns = px.shape
DcmDataset.pixel_array = property(DcmDataset.pixel_array.fget, set_pixels)
# Cell
@patch
def zoom(self:DcmDataset, ratio):
"Zoom image by specified ratio"
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
self.set_pixels(ndimage.zoom(self.pixel_array, ratio))
# Cell
@patch
def zoom_to(self:DcmDataset, sz):
"Change image size to specified pixel size"
if not isinstance(sz,(list,tuple)): sz=(sz,sz)
rows,cols = sz
self.zoom((rows/self.Rows,cols/self.Columns))
# Cell
@patch(as_prop=True)
def shape(self:DcmDataset):
"Returns the shape of a dicom image as rows and columns"
return self.Rows,self.Columns
# Cell
def _cast_dicom_special(x):
cls = type(x)
if not cls.__module__.startswith('pydicom'): return x
if cls.__base__ == object: return x
return cls.__base__(x)
def _split_elem(res,k,v):
if not isinstance(v,DcmMultiValue): return
res[f'Multi{k}'] = 1
for i,o in enumerate(v): res[f'{k}{"" if i==0 else i}']=o
# Cell
@patch
def as_dict(self:DcmDataset, px_summ=True, window=dicom_windows.brain):
"Convert the header of a dicom into a dictionary"
pxdata = (0x7fe0,0x0010)
vals = [self[o] for o in self.keys() if o != pxdata]
its = [(v.keyword,v.value) for v in vals]
res = dict(its)
res['fname'] = self.filename
for k,v in its: _split_elem(res,k,v)
if not px_summ: return res
stats = 'min','max','mean','std'
try:
pxs = self.pixel_array
for f in stats: res['img_'+f] = getattr(pxs,f)()
res['img_pct_window'] = self.pct_in_window(*window)
except Exception as e:
for f in stats: res['img_'+f] = 0
print(res,e)
for k in res: res[k] = _cast_dicom_special(res[k])
return res
# Cell
def _dcm2dict(fn, **kwargs): return fn.dcmread().as_dict(**kwargs)
# Cell
@delegates(parallel)
def _from_dicoms(cls, fns, n_workers=0, **kwargs):
return pd.DataFrame(parallel(_dcm2dict, fns, n_workers=n_workers, **kwargs))
pd.DataFrame.from_dicoms = classmethod(_from_dicoms)
# Cell
class DicomSegmentationDataLoaders(DataLoaders):
"Basic wrapper around DICOM `DataLoaders` with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock(cls=PILDicom), MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/medical/imaging.py
|
imaging.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/65_medical.text.ipynb (unless otherwise specified).
__all__ = []
# Cell
from ..basics import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/medical/text.py
|
text.py
|
from .imaging import *
from .text import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/medical/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/08_vision.data.ipynb (unless otherwise specified).
__all__ = ['get_grid', 'clip_remove_empty', 'bb_pad', 'ImageBlock', 'MaskBlock', 'PointBlock', 'BBoxBlock',
'BBoxLblBlock', 'ImageDataLoaders', 'SegmentationDataLoaders']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
# Cell
@delegates(subplots)
def get_grid(n, nrows=None, ncols=None, add_vert=0, figsize=None, double=False, title=None, return_fig=False, **kwargs):
"Return a grid of `n` axes, `rows` by `cols`"
nrows = nrows or int(math.sqrt(n))
ncols = ncols or int(np.ceil(n/nrows))
if double: ncols*=2 ; n*=2
fig,axs = subplots(nrows, ncols, figsize=figsize, **kwargs)
axs = [ax if i<n else ax.set_axis_off() for i, ax in enumerate(axs.flatten())][:n]
if title is not None: fig.suptitle(title, weight='bold', size=14)
return (fig,axs) if return_fig else axs
# Cell
def clip_remove_empty(bbox, label):
"Clip bounding boxes with image border and label background the empty ones"
bbox = torch.clamp(bbox, -1, 1)
empty = ((bbox[...,2] - bbox[...,0])*(bbox[...,3] - bbox[...,1]) <= 0.)
return (bbox[~empty], label[~empty])
# Cell
def bb_pad(samples, pad_idx=0):
"Function that collect `samples` of labelled bboxes and adds padding with `pad_idx`."
samples = [(s[0], *clip_remove_empty(*s[1:])) for s in samples]
max_len = max([len(s[2]) for s in samples])
def _f(img,bbox,lbl):
bbox = torch.cat([bbox,bbox.new_zeros(max_len-bbox.shape[0], 4)])
lbl = torch.cat([lbl, lbl .new_zeros(max_len-lbl .shape[0])+pad_idx])
return img,bbox,lbl
return [_f(*s) for s in samples]
# Cell
@typedispatch
def show_batch(x:TensorImage, y, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_batch(x:TensorImage, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True)
for i in range(2):
ctxs[i::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::2],range(max_n))]
return ctxs
# Cell
def ImageBlock(cls=PILImage):
"A `TransformBlock` for images of `cls`"
return TransformBlock(type_tfms=cls.create, batch_tfms=IntToFloatTensor)
# Cell
def MaskBlock(codes=None):
"A `TransformBlock` for segmentation masks, potentially with `codes`"
return TransformBlock(type_tfms=PILMask.create, item_tfms=AddMaskCodes(codes=codes), batch_tfms=IntToFloatTensor)
# Cell
PointBlock = TransformBlock(type_tfms=TensorPoint.create, item_tfms=PointScaler)
BBoxBlock = TransformBlock(type_tfms=TensorBBox.create, item_tfms=PointScaler, dls_kwargs = {'before_batch': bb_pad})
PointBlock.__doc__ = "A `TransformBlock` for points in an image"
BBoxBlock.__doc__ = "A `TransformBlock` for bounding boxes in an image"
# Cell
def BBoxLblBlock(vocab=None, add_na=True):
"A `TransformBlock` for labeled bounding boxes, potentially with `vocab`"
return TransformBlock(type_tfms=MultiCategorize(vocab=vocab, add_na=add_na), item_tfms=BBoxLabeler)
# Cell
class ImageDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for computer vision problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, item_tfms=None,
batch_tfms=None, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
get_items = get_image_files if valid_pct else partial(get_image_files, folders=[train, valid])
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock(vocab=vocab)),
get_items=get_items,
splitter=splitter,
get_y=parent_label,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, path, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_path_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`"
dblock = DataBlock(blocks=(ImageBlock, CategoryBlock),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, fnames, path=path, **kwargs)
@classmethod
def from_name_func(cls, path, fnames, label_func, **kwargs):
"Create from the name attrs of `fnames` in `path`s with `label_func`"
f = using_attr(label_func, 'name')
return cls.from_path_func(path, fnames, f, **kwargs)
@classmethod
def from_path_re(cls, path, fnames, pat, **kwargs):
"Create from list of `fnames` in `path`s with re expression `pat`"
return cls.from_path_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_name_re(cls, path, fnames, pat, **kwargs):
"Create from the name attrs of `fnames` in `path`s with re expression `pat`"
return cls.from_name_func(path, fnames, RegexLabeller(pat), **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, fn_col=0, folder=None, suff='', label_col=1, label_delim=None,
y_block=None, valid_col=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from `df` using `fn_col` and `label_col`"
pref = f'{Path(path) if folder is None else Path(path)/folder}{os.path.sep}'
if y_block is None:
is_multi = (is_listy(label_col) and len(label_col) > 1) or label_delim is not None
y_block = MultiCategoryBlock if is_multi else CategoryBlock
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=(ImageBlock, y_block),
get_x=ColReader(fn_col, pref=pref, suff=suff),
get_y=ColReader(label_col, label_delim=label_delim),
splitter=splitter,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, df, path=path, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `path/csv_fname` using `fn_col` and `label_col`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_lists(cls, path, fnames, labels, valid_pct=0.2, seed:int=None, y_block=None, item_tfms=None, batch_tfms=None,
**kwargs):
"Create from list of `fnames` and `labels` in `path`"
if y_block is None:
y_block = MultiCategoryBlock if is_listy(labels[0]) and len(labels[0]) > 1 else (
RegressionBlock if isinstance(labels[0], float) else CategoryBlock)
dblock = DataBlock.from_columns(blocks=(ImageBlock, y_block),
splitter=RandomSplitter(valid_pct, seed=seed),
item_tfms=item_tfms,
batch_tfms=batch_tfms)
return cls.from_dblock(dblock, (fnames, labels), path=path, **kwargs)
ImageDataLoaders.from_csv = delegates(to=ImageDataLoaders.from_df)(ImageDataLoaders.from_csv)
ImageDataLoaders.from_name_func = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_name_func)
ImageDataLoaders.from_path_re = delegates(to=ImageDataLoaders.from_path_func)(ImageDataLoaders.from_path_re)
ImageDataLoaders.from_name_re = delegates(to=ImageDataLoaders.from_name_func)(ImageDataLoaders.from_name_re)
# Cell
class SegmentationDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for segmentation problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_label_func(cls, path, fnames, label_func, valid_pct=0.2, seed=None, codes=None, item_tfms=None, batch_tfms=None, **kwargs):
"Create from list of `fnames` in `path`s with `label_func`."
dblock = DataBlock(blocks=(ImageBlock, MaskBlock(codes=codes)),
splitter=RandomSplitter(valid_pct, seed=seed),
get_y=label_func,
item_tfms=item_tfms,
batch_tfms=batch_tfms)
res = cls.from_dblock(dblock, fnames, path=path, **kwargs)
return res
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/data.py
|
data.py
|
from . import models
from ..basics import *
from ..callback.all import *
from .augment import *
from .core import *
from .data import *
from .models.all import *
from .learner import *
from .utils import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/all.py
|
all.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/21_vision.learner.ipynb (unless otherwise specified).
__all__ = ['has_pool_type', 'create_body', 'create_head', 'default_split', 'model_meta', 'create_cnn_model',
'cnn_learner', 'create_unet_model', 'unet_learner', 'has_pool_type', 'create_body', 'create_head',
'default_split', 'model_meta', 'create_cnn_model', 'cnn_learner', 'create_unet_model', 'unet_learner']
# Cell
from ..basics import *
from .core import *
from .data import *
from .augment import *
from . import models
# Cell
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
# Cell
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
# Cell
def _get_first_layer(m):
"Access first layer of a model"
c,p,n = m,None,None # child, parent, name
for n in next(m.named_parameters())[0].split('.')[:-1]:
p,c=c,getattr(c,n)
return c,p,n
# Cell
def _load_pretrained_weights(new_layer, previous_layer):
"Load pretrained weights based on number of input channels"
n_in = getattr(new_layer, 'in_channels')
if n_in==1:
# we take the sum
new_layer.weight.data = previous_layer.weight.data.sum(dim=1, keepdim=True)
elif n_in==2:
# we take first 2 channels + 50%
new_layer.weight.data = previous_layer.weight.data[:,:2] * 1.5
else:
# keep 3 channels weights and set others to null
new_layer.weight.data[:,:3] = previous_layer.weight.data
new_layer.weight.data[:,3:].zero_()
# Cell
def _update_first_layer(model, n_in, pretrained):
"Change first layer based on number of input channels"
if n_in == 3: return
first_layer, parent, name = _get_first_layer(model)
assert isinstance(first_layer, nn.Conv2d), f'Change of input channels only supported with Conv2d, found {first_layer.__class__.__name__}'
assert getattr(first_layer, 'in_channels') == 3, f'Unexpected number of input channels, found {getattr(first_layer, "in_channels")} while expecting 3'
params = {attr:getattr(first_layer, attr) for attr in 'out_channels kernel_size stride padding dilation groups padding_mode'.split()}
params['bias'] = getattr(first_layer, 'bias') is not None
params['in_channels'] = n_in
new_layer = nn.Conv2d(**params)
if pretrained:
_load_pretrained_weights(new_layer, first_layer)
setattr(parent, name, new_layer)
# Cell
def create_body(arch, n_in=3, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
_update_first_layer(model, n_in, pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
# Cell
def create_head(nf, n_out, lin_ftrs=None, ps=0.5, concat_pool=True, bn_final=False, lin_first=False, y_range=None):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `n_out` classes."
lin_ftrs = [nf, 512, n_out] if lin_ftrs is None else [nf] + lin_ftrs + [n_out]
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], n_out))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
if y_range is not None: layers.append(SigmoidRange(*y_range))
return nn.Sequential(*layers)
# Cell
from ..callback.hook import num_features_model
# Cell
def default_split(m):
"Default split of a model between body and head"
return L(m[0], m[1:]).map(params)
# Cell
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-4, 'split':_xresnet_split, 'stats':imagenet_stats}
_resnet_meta = {'cut':-2, 'split':_resnet_split, 'stats':imagenet_stats}
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split, 'stats':imagenet_stats}
_densenet_meta = {'cut':-1, 'split':_densenet_split, 'stats':imagenet_stats}
_vgg_meta = {'cut':-2, 'split':_vgg_split, 'stats':imagenet_stats}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split, 'stats':imagenet_stats}
# Cell
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
# Cell
@delegates(create_head)
def create_cnn_model(arch, n_out, pretrained=True, cut=None, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,
concat_pool=True, **kwargs):
"Create custom convnet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children())) * (2 if concat_pool else 1)
head = create_head(nf, n_out, concat_pool=concat_pool, **kwargs)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
# Cell
def _add_norm(dls, meta, pretrained):
if not pretrained: return
after_batch = dls.after_batch
if first(o for o in after_batch.fs if isinstance(o,Normalize)): return
stats = meta.get('stats')
if stats is None: return
after_batch.add(Normalize.from_stats(*stats))
# Cell
@delegates(create_cnn_model)
def cnn_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a convnet style learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to cnn_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
model = create_cnn_model(arch, n_out, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# Cell
@delegates(models.unet.DynamicUnet.__init__)
def create_unet_model(arch, n_out, img_size, pretrained=True, cut=None, n_in=3, **kwargs):
"Create custom unet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
model = models.unet.DynamicUnet(body, n_out, img_size, **kwargs)
return model
# Cell
@delegates(create_unet_model)
def unet_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a unet learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to unet_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
n_out = ifnone(n_out, get_c(dls))
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
img_size = dls.one_batch()[0].shape[-2:]
assert img_size, "image size could not be inferred from data"
model = create_unet_model(arch, n_out, img_size, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn)
if kwargs: store_attr(self=learn, **kwargs)
return learn
# Cell
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:(TensorMask, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=6,
nrows=None, ncols=1, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True,
title='Target/Prediction')
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]
for o in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorImage, samples, outs, ctxs=None, max_n=10, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(3*min(len(samples), max_n), ncols=3, figsize=figsize, title='Input/Target/Prediction')
for i in range(2):
ctxs[i::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::3],range(max_n))]
ctxs[2::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs[2::3],range(max_n))]
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
# Cell
from ..basics import *
from .core import *
from .data import *
from .augment import *
from . import models
# Cell
def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__)
# Cell
def has_pool_type(m):
"Return `True` if `m` is a pooling layer or has one in its children"
if _is_pool_type(m): return True
for l in m.children():
if has_pool_type(l): return True
return False
# Cell
def _get_first_layer(m):
"Access first layer of a model"
c,p,n = m,None,None # child, parent, name
for n in next(m.named_parameters())[0].split('.')[:-1]:
p,c=c,getattr(c,n)
return c,p,n
# Cell
def _load_pretrained_weights(new_layer, previous_layer):
"Load pretrained weights based on number of input channels"
n_in = getattr(new_layer, 'in_channels')
if n_in==1:
# we take the sum
new_layer.weight.data = previous_layer.weight.data.sum(dim=1, keepdim=True)
elif n_in==2:
# we take first 2 channels + 50%
new_layer.weight.data = previous_layer.weight.data[:,:2] * 1.5
else:
# keep 3 channels weights and set others to null
new_layer.weight.data[:,:3] = previous_layer.weight.data
new_layer.weight.data[:,3:].zero_()
# Cell
def _update_first_layer(model, n_in, pretrained):
"Change first layer based on number of input channels"
if n_in == 3: return
first_layer, parent, name = _get_first_layer(model)
assert isinstance(first_layer, nn.Conv2d), f'Change of input channels only supported with Conv2d, found {first_layer.__class__.__name__}'
assert getattr(first_layer, 'in_channels') == 3, f'Unexpected number of input channels, found {getattr(first_layer, "in_channels")} while expecting 3'
params = {attr:getattr(first_layer, attr) for attr in 'out_channels kernel_size stride padding dilation groups padding_mode'.split()}
params['bias'] = getattr(first_layer, 'bias') is not None
params['in_channels'] = n_in
new_layer = nn.Conv2d(**params)
if pretrained:
_load_pretrained_weights(new_layer, first_layer)
setattr(parent, name, new_layer)
# Cell
def create_body(arch, n_in=3, pretrained=True, cut=None):
"Cut off the body of a typically pretrained `arch` as determined by `cut`"
model = arch(pretrained=pretrained)
_update_first_layer(model, n_in, pretrained)
#cut = ifnone(cut, cnn_config(arch)['cut'])
if cut is None:
ll = list(enumerate(model.children()))
cut = next(i for i,o in reversed(ll) if has_pool_type(o))
if isinstance(cut, int): return nn.Sequential(*list(model.children())[:cut])
elif callable(cut): return cut(model)
else: raise NamedError("cut must be either integer or a function")
# Cell
def create_head(nf, n_out, lin_ftrs=None, ps=0.5, concat_pool=True, first_bn=True, bn_final=False,
lin_first=False, y_range=None):
"Model head that takes `nf` features, runs through `lin_ftrs`, and out `n_out` classes."
if concat_pool: nf *= 2
lin_ftrs = [nf, 512, n_out] if lin_ftrs is None else [nf] + lin_ftrs + [n_out]
bns = [first_bn] + [True]*len(lin_ftrs[1:])
ps = L(ps)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,bn,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], bns, ps, actns):
layers += LinBnDrop(ni, no, bn=bn, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], n_out))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
if y_range is not None: layers.append(SigmoidRange(*y_range))
return nn.Sequential(*layers)
# Cell
from ..callback.hook import num_features_model
# Cell
def default_split(m):
"Default split of a model between body and head"
return L(m[0], m[1:]).map(params)
# Cell
def _xresnet_split(m): return L(m[0][:3], m[0][3:], m[1:]).map(params)
def _resnet_split(m): return L(m[0][:6], m[0][6:], m[1:]).map(params)
def _squeezenet_split(m:nn.Module): return L(m[0][0][:5], m[0][0][5:], m[1:]).map(params)
def _densenet_split(m:nn.Module): return L(m[0][0][:7],m[0][0][7:], m[1:]).map(params)
def _vgg_split(m:nn.Module): return L(m[0][0][:22], m[0][0][22:], m[1:]).map(params)
def _alexnet_split(m:nn.Module): return L(m[0][0][:6], m[0][0][6:], m[1:]).map(params)
_default_meta = {'cut':None, 'split':default_split}
_xresnet_meta = {'cut':-4, 'split':_xresnet_split, 'stats':imagenet_stats}
_resnet_meta = {'cut':-2, 'split':_resnet_split, 'stats':imagenet_stats}
_squeezenet_meta = {'cut':-1, 'split': _squeezenet_split, 'stats':imagenet_stats}
_densenet_meta = {'cut':-1, 'split':_densenet_split, 'stats':imagenet_stats}
_vgg_meta = {'cut':-2, 'split':_vgg_split, 'stats':imagenet_stats}
_alexnet_meta = {'cut':-2, 'split':_alexnet_split, 'stats':imagenet_stats}
# Cell
model_meta = {
models.xresnet.xresnet18 :{**_xresnet_meta}, models.xresnet.xresnet34: {**_xresnet_meta},
models.xresnet.xresnet50 :{**_xresnet_meta}, models.xresnet.xresnet101:{**_xresnet_meta},
models.xresnet.xresnet152:{**_xresnet_meta},
models.resnet18 :{**_resnet_meta}, models.resnet34: {**_resnet_meta},
models.resnet50 :{**_resnet_meta}, models.resnet101:{**_resnet_meta},
models.resnet152:{**_resnet_meta},
models.squeezenet1_0:{**_squeezenet_meta},
models.squeezenet1_1:{**_squeezenet_meta},
models.densenet121:{**_densenet_meta}, models.densenet169:{**_densenet_meta},
models.densenet201:{**_densenet_meta}, models.densenet161:{**_densenet_meta},
models.vgg11_bn:{**_vgg_meta}, models.vgg13_bn:{**_vgg_meta}, models.vgg16_bn:{**_vgg_meta}, models.vgg19_bn:{**_vgg_meta},
models.alexnet:{**_alexnet_meta}}
# Cell
@delegates(create_head)
def create_cnn_model(arch, n_out, pretrained=True, cut=None, n_in=3, init=nn.init.kaiming_normal_, custom_head=None,
concat_pool=True, **kwargs):
"Create custom convnet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
if custom_head is None:
nf = num_features_model(nn.Sequential(*body.children()))
head = create_head(nf, n_out, concat_pool=concat_pool, **kwargs)
else: head = custom_head
model = nn.Sequential(body, head)
if init is not None: apply_init(model[1], init)
return model
# Cell
def _add_norm(dls, meta, pretrained):
if not pretrained: return
after_batch = dls.after_batch
if first(o for o in after_batch.fs if isinstance(o,Normalize)): return
stats = meta.get('stats')
if stats is None: return
after_batch.add(Normalize.from_stats(*stats))
# Cell
@delegates(create_cnn_model)
def cnn_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a convnet style learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to cnn_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
model = create_cnn_model(arch, n_out, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# Cell
@delegates(models.unet.DynamicUnet.__init__)
def create_unet_model(arch, n_out, img_size, pretrained=True, cut=None, n_in=3, **kwargs):
"Create custom unet architecture"
meta = model_meta.get(arch, _default_meta)
body = create_body(arch, n_in, pretrained, ifnone(cut, meta['cut']))
model = models.unet.DynamicUnet(body, n_out, img_size, **kwargs)
return model
# Cell
@delegates(create_unet_model)
def unet_learner(dls, arch, normalize=True, n_out=None, pretrained=True, config=None,
# learner args
loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=None, cbs=None, metrics=None, path=None,
model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95,0.85,0.95),
# other model args
**kwargs):
"Build a unet learner from `dls` and `arch`"
if config:
warnings.warn('config param is deprecated. Pass your args directly to unet_learner.')
kwargs = {**config, **kwargs}
meta = model_meta.get(arch, _default_meta)
if normalize: _add_norm(dls, meta, pretrained)
n_out = ifnone(n_out, get_c(dls))
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
img_size = dls.one_batch()[0].shape[-2:]
assert img_size, "image size could not be inferred from data"
model = create_unet_model(arch, n_out, img_size, pretrained=pretrained, **kwargs)
splitter=ifnone(splitter, meta['split'])
learn = Learner(dls=dls, model=model, loss_func=loss_func, opt_func=opt_func, lr=lr, splitter=splitter, cbs=cbs,
metrics=metrics, path=path, model_dir=model_dir, wd=wd, wd_bn_bias=wd_bn_bias, train_bn=train_bn,
moms=moms)
if pretrained: learn.freeze()
# keep track of args for loggers
store_attr('arch,normalize,n_out,pretrained', self=learn, **kwargs)
return learn
# Cell
@typedispatch
def show_results(x:TensorImage, y, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorCategory, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i in range(2):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [r.show(ctx=c, color='green' if b==r else 'red', **kwargs)
for b,r,c,_ in zip(samples.itemgot(1),outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:(TensorMask, TensorPoint, TensorBBox), samples, outs, ctxs=None, max_n=6,
nrows=None, ncols=1, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, double=True,
title='Target/Prediction')
for i in range(2):
ctxs[::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[::2],range(2*max_n))]
for o in [samples,outs]:
ctxs[1::2] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(o.itemgot(0),ctxs[1::2],range(2*max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x:TensorImage, y:TensorImage, samples, outs, ctxs=None, max_n=10, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(3*min(len(samples), max_n), ncols=3, figsize=figsize, title='Input/Target/Prediction')
for i in range(2):
ctxs[i::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs[i::3],range(max_n))]
ctxs[2::3] = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs[2::3],range(max_n))]
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize, title='Prediction/Actual/Loss/Probability')
for ax,s,o,r,l in zip(axs, samples, outs, raws, losses):
s[0].show(ctx=ax, **kwargs)
ax.set_title(f'{o[0]}/{s[1]} / {l.item():.2f} / {r.max().item():.2f}')
# Cell
@typedispatch
def plot_top_losses(x: TensorImage, y:TensorMultiCategory, samples, outs, raws, losses, nrows=None, ncols=None, figsize=None, **kwargs):
axs = get_grid(len(samples), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
for i,(ax,s) in enumerate(zip(axs, samples)): s[0].show(ctx=ax, title=f'Image {i}', **kwargs)
rows = get_empty_df(len(samples))
outs = L(s[1:] + o + (TitledStr(r), TitledFloat(l.item())) for s,o,r,l in zip(samples, outs, raws, losses))
for i,l in enumerate(["target", "predicted", "probabilities", "loss"]):
rows = [b.show(ctx=r, label=l, **kwargs) for b,r in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/learner.py
|
learner.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09_vision.augment.ipynb (unless otherwise specified).
__all__ = ['RandTransform', 'TensorTypes', 'FlipItem', 'DihedralItem', 'PadMode', 'CropPad', 'RandomCrop',
'OldRandomCrop', 'ResizeMethod', 'Resize', 'RandomResizedCrop', 'RatioResize', 'affine_grid',
'AffineCoordTfm', 'RandomResizedCropGPU', 'mask_tensor', 'affine_mat', 'flip_mat', 'Flip',
'DeterministicDraw', 'DeterministicFlip', 'dihedral_mat', 'Dihedral', 'DeterministicDihedral', 'rotate_mat',
'Rotate', 'zoom_mat', 'Zoom', 'find_coeffs', 'apply_perspective', 'Warp', 'SpaceTfm', 'LightingTfm',
'Brightness', 'Contrast', 'grayscale', 'Saturation', 'rgb2hsv', 'hsv2rgb', 'HSVTfm', 'Hue',
'cutout_gaussian', 'norm_apply_denorm', 'RandomErasing', 'setup_aug_tfms', 'aug_transforms']
# Cell
from ..data.all import *
from .core import *
from .data import *
# Cell
from torch import stack, zeros_like as t0, ones_like as t1
from torch.distributions.bernoulli import Bernoulli
# Cell
class RandTransform(DisplayedTransform):
"A transform that before_call its state at each `__call__`"
do,nm,supports,split_idx = True,None,[],0
def __init__(self, p=1., nm=None, before_call=None, **kwargs):
store_attr('p')
super().__init__(**kwargs)
self.before_call = ifnone(before_call,self.before_call)
def before_call(self, b, split_idx):
"Set `self.do` based on `self.p`"
self.do = self.p==1. or random.random() < self.p
def __call__(self, b, split_idx=None, **kwargs):
self.before_call(b, split_idx=split_idx)
return super().__call__(b, split_idx=split_idx, **kwargs) if self.do else b
# Cell
def _neg_axis(x, axis):
x[...,axis] = -x[...,axis]
return x
TensorTypes = (TensorImage,TensorMask,TensorPoint,TensorBBox)
# Internal Cell
@patch
def flip_lr(x:Image.Image): return x.transpose(Image.FLIP_LEFT_RIGHT)
@patch
def flip_lr(x:TensorImageBase): return x.flip(-1)
@patch
def flip_lr(x:TensorPoint): return TensorPoint(_neg_axis(x.clone(), 0))
@patch
def flip_lr(x:TensorBBox): return TensorBBox(TensorPoint(x.view(-1,2)).flip_lr().view(-1,4))
# Cell
class FlipItem(RandTransform):
"Randomly flip with probability `p`"
def __init__(self, p=0.5): super().__init__(p=p)
def encodes(self, x:(Image.Image,*TensorTypes)): return x.flip_lr()
# Internal Cell
@patch
def dihedral(x:PILImage, k): return x if k==0 else x.transpose(k-1)
@patch
def dihedral(x:TensorImage, k):
if k in [1,3,4,7]: x = x.flip(-1)
if k in [2,4,5,7]: x = x.flip(-2)
if k in [3,5,6,7]: x = x.transpose(-1,-2)
return x
@patch
def dihedral(x:TensorPoint, k):
if k in [1,3,4,7]: x = _neg_axis(x, 0)
if k in [2,4,5,7]: x = _neg_axis(x, 1)
if k in [3,5,6,7]: x = x.flip(1)
return x
@patch
def dihedral(x:TensorBBox, k):
pnts = TensorPoint(x.view(-1,2)).dihedral(k).view(-1,2,2)
tl,br = pnts.min(dim=1)[0],pnts.max(dim=1)[0]
return TensorBBox(torch.cat([tl, br], dim=1), img_size=x.img_size)
# Cell
class DihedralItem(RandTransform):
"Randomly flip with probability `p`"
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
self.k = random.randint(0,7)
def encodes(self, x:(Image.Image,*TensorTypes)): return x.dihedral(self.k)
# Cell
from torchvision.transforms.functional import pad as tvpad
# Cell
mk_class('PadMode', **{o:o.lower() for o in ['Zeros', 'Border', 'Reflection']},
doc="All possible padding mode as attributes to get tab-completion and typo-proofing")
# Cell
#nbdev_comment _all_ = ['PadMode']
# Internal Cell
_pad_modes = {'zeros': 'constant', 'border': 'edge', 'reflection': 'reflect'}
@patch
def _do_crop_pad(x:Image.Image, sz, tl, orig_sz,
pad_mode=PadMode.Zeros, resize_mode=Image.BILINEAR, resize_to=None):
if any(tl.ge(0)):
# At least one dim is inside the image, so needs to be cropped
c = tl.max(0)
x = x.crop((*c, *c.add(sz).min(orig_sz)))
if any(tl.lt(0)):
# At least one dim is outside the image, so needs to be padded
p = (-tl).max(0)
f = (sz-orig_sz-p).max(0)
x = tvpad(x, (*p, *f), padding_mode=_pad_modes[pad_mode])
if resize_to is not None: x = x.resize(resize_to, resize_mode)
return x
@patch
def _do_crop_pad(x:TensorPoint, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
#assert pad_mode==PadMode.Zeros,"Only zero padding is supported for `TensorPoint` and `TensorBBox`"
orig_sz,sz,tl = map(FloatTensor, (orig_sz,sz,tl))
return TensorPoint((x+1)*orig_sz/sz - tl*2/sz - 1, sz=sz if resize_to is None else resize_to)
@patch
def _do_crop_pad(x:TensorBBox, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
bbox = TensorPoint._do_crop_pad(x.view(-1,2), sz, tl, orig_sz, pad_mode, resize_to).view(-1,4)
return TensorBBox(bbox, img_size=x.img_size)
@patch
def crop_pad(x:(TensorBBox,TensorPoint,Image.Image),
sz, tl=None, orig_sz=None, pad_mode=PadMode.Zeros, resize_mode=Image.BILINEAR, resize_to=None):
if isinstance(sz,int): sz = (sz,sz)
orig_sz = fastuple(_get_sz(x) if orig_sz is None else orig_sz)
sz,tl = fastuple(sz),fastuple(((_get_sz(x)-sz)//2) if tl is None else tl)
return x._do_crop_pad(sz, tl, orig_sz=orig_sz, pad_mode=pad_mode, resize_mode=resize_mode, resize_to=resize_to)
# Cell
def _process_sz(size):
if isinstance(size,int): size=(size,size)
return fastuple(size[1],size[0])
def _get_sz(x):
if isinstance(x, tuple): x = x[0]
if not isinstance(x, Tensor): return fastuple(x.size)
return fastuple(getattr(x, 'img_size', getattr(x, 'sz', (x.shape[-1], x.shape[-2]))))
# Cell
@delegates()
class CropPad(DisplayedTransform):
"Center crop or pad an image to `size`"
order = 0
def __init__(self, size, pad_mode=PadMode.Zeros, **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
orig_sz = _get_sz(x)
tl = (orig_sz-self.size)//2
return x.crop_pad(self.size, tl, orig_sz=orig_sz, pad_mode=self.pad_mode)
# Cell
@delegates()
class RandomCrop(RandTransform):
"Randomly crop an image to `size`"
split_idx,order = None,1
def __init__(self, size, **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
def before_call(self, b, split_idx):
self.orig_sz = _get_sz(b)
if split_idx: self.tl = (self.orig_sz-self.size)//2
else:
wd = self.orig_sz[0] - self.size[0]
hd = self.orig_sz[1] - self.size[1]
w_rand = (wd, -1) if wd < 0 else (0, wd)
h_rand = (hd, -1) if hd < 0 else (0, hd)
self.tl = fastuple(random.randint(*w_rand), random.randint(*h_rand))
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
return x.crop_pad(self.size, self.tl, orig_sz=self.orig_sz)
# Cell
class OldRandomCrop(CropPad):
"Randomly crop an image to `size`"
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
w,h = self.orig_sz
if not split_idx: self.tl = (random.randint(0,w-self.cp_size[0]), random.randint(0,h-self.cp_size[1]))
# Cell
mk_class('ResizeMethod', **{o:o.lower() for o in ['Squish', 'Crop', 'Pad']},
doc="All possible resize method as attributes to get tab-completion and typo-proofing")
# Cell
#nbdev_comment _all_ = ['ResizeMethod']
# Cell
@delegates()
class Resize(RandTransform):
split_idx,mode,mode_mask,order = None,Image.BILINEAR,Image.NEAREST,1
"Resize image to `size` using `method`"
def __init__(self, size, method=ResizeMethod.Crop, pad_mode=PadMode.Reflection,
resamples=(Image.BILINEAR, Image.NEAREST), **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
self.mode,self.mode_mask = resamples
def before_call(self, b, split_idx):
if self.method==ResizeMethod.Squish: return
self.pcts = (0.5,0.5) if split_idx else (random.random(),random.random())
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
orig_sz = _get_sz(x)
if self.method==ResizeMethod.Squish:
return x.crop_pad(orig_sz, fastuple(0,0), orig_sz=orig_sz, pad_mode=self.pad_mode,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.size)
w,h = orig_sz
op = (operator.lt,operator.gt)[self.method==ResizeMethod.Pad]
m = w/self.size[0] if op(w/self.size[0],h/self.size[1]) else h/self.size[1]
cp_sz = (int(m*self.size[0]),int(m*self.size[1]))
tl = fastuple(int(self.pcts[0]*(w-cp_sz[0])), int(self.pcts[1]*(h-cp_sz[1])))
return x.crop_pad(cp_sz, tl, orig_sz=orig_sz, pad_mode=self.pad_mode,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.size)
# Cell
@delegates()
class RandomResizedCrop(RandTransform):
"Picks a random scaled crop of an image and resize it to `size`"
split_idx,order = None,1
def __init__(self, size, min_scale=0.08, ratio=(3/4, 4/3), resamples=(Image.BILINEAR, Image.NEAREST),
val_xtra=0.14, **kwargs):
size = _process_sz(size)
store_attr()
super().__init__(**kwargs)
self.mode,self.mode_mask = resamples
def before_call(self, b, split_idx):
w,h = self.orig_sz = _get_sz(b)
if split_idx:
xtra = math.ceil(max(*self.size[:2])*self.val_xtra/8)*8
self.final_size = (self.size[0]+xtra, self.size[1]+xtra)
self.tl,self.cp_size = (0,0),self.orig_sz
return
self.final_size = self.size
for attempt in range(10):
area = random.uniform(self.min_scale,1.) * w * h
ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))
nw = int(round(math.sqrt(area * ratio)))
nh = int(round(math.sqrt(area / ratio)))
if nw <= w and nh <= h:
self.cp_size = (nw,nh)
self.tl = random.randint(0,w-nw), random.randint(0,h - nh)
return
if w/h < self.ratio[0]: self.cp_size = (w, int(w/self.ratio[0]))
elif w/h > self.ratio[1]: self.cp_size = (int(h*self.ratio[1]), h)
else: self.cp_size = (w, h)
self.tl = ((w-self.cp_size[0])//2, (h-self.cp_size[1])//2)
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
res = x.crop_pad(self.cp_size, self.tl, orig_sz=self.orig_sz,
resize_mode=self.mode_mask if isinstance(x,PILMask) else self.mode, resize_to=self.final_size)
if self.final_size != self.size: res = res.crop_pad(self.size) #Validation set: one final center crop
return res
# Cell
class RatioResize(DisplayedTransform):
'Resizes the biggest dimension of an image to `max_sz` maintaining the aspect ratio'
order = 1
def __init__(self, max_sz, resamples=(Image.BILINEAR, Image.NEAREST), **kwargs):
store_attr()
super().__init__(**kwargs)
def encodes(self, x:(Image.Image,TensorBBox,TensorPoint)):
w,h = _get_sz(x)
if w >= h: nw,nh = self.max_sz,h*self.max_sz/w
else: nw,nh = w*self.max_sz/h,self.max_sz
return Resize(size=(int(nh),int(nw)), resamples=self.resamples)(x)
# Cell
def _init_mat(x):
mat = torch.eye(3, device=x.device).float()
return mat.unsqueeze(0).expand(x.size(0), 3, 3).contiguous()
# Cell
def _grid_sample(x, coords, mode='bilinear', padding_mode='reflection', align_corners=None):
"Resample pixels in `coords` from `x` by `mode`, with `padding_mode` in ('reflection','border','zeros')."
#coords = coords.permute(0, 3, 1, 2).contiguous().permute(0, 2, 3, 1) # optimize layout for grid_sample
if mode=='bilinear': # hack to get smoother downwards resampling
mn,mx = coords.min(),coords.max()
# max amount we're affine zooming by (>1 means zooming in)
z = 1/(mx-mn).item()*2
# amount we're resizing by, with 100% extra margin
d = min(x.shape[-2]/coords.shape[-2], x.shape[-1]/coords.shape[-1])/2
# If we're resizing up by >200%, and we're zooming less than that, interpolate first
if d>1 and d>z:
# Pytorch > v1.4.x needs an extra argument when calling nn.functional.interpolate to preserve previous behaviour
if (int(torch.__version__[0:4].replace(".", "")) > 14):
x = F.interpolate(x, scale_factor=1/d, mode='area', recompute_scale_factor=True)
else:
x = F.interpolate(x, scale_factor=1/d, mode='area')
return F.grid_sample(x, coords, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
# Cell
def affine_grid(theta, size, align_corners=None):
return TensorFlowField(F.affine_grid(theta, size, align_corners=align_corners))
# Internal Cell
@patch
def affine_coord(x: TensorImage, mat=None, coord_tfm=None, sz=None, mode='bilinear',
pad_mode=PadMode.Reflection, align_corners=True):
if mat is None and coord_tfm is None and sz is None: return x
size = tuple(x.shape[-2:]) if sz is None else (sz,sz) if isinstance(sz,int) else tuple(sz)
if mat is None: mat = _init_mat(x)[:,:2]
coords = affine_grid(mat, x.shape[:2] + size, align_corners=align_corners)
if coord_tfm is not None: coords = coord_tfm(coords)
return TensorImage(_grid_sample(x, coords, mode=mode, padding_mode=pad_mode, align_corners=align_corners))
@patch
def affine_coord(x: TensorMask, mat=None, coord_tfm=None, sz=None, mode='nearest',
pad_mode=PadMode.Reflection, align_corners=True):
add_dim = (x.ndim==3)
if add_dim: x = x[:,None]
res = TensorImage.affine_coord(x.float(), mat, coord_tfm, sz, mode, pad_mode, align_corners).long()
if add_dim: res = res[:,0]
return TensorMask(res)
@patch
def affine_coord(x: TensorPoint, mat=None, coord_tfm=None, sz=None, mode='nearest',
pad_mode=PadMode.Zeros, align_corners=True):
#assert pad_mode==PadMode.Zeros, "Only zero padding is supported for `TensorPoint` and `TensorBBox`"
if sz is None: sz = getattr(x, "img_size", None)
if coord_tfm is not None: x = coord_tfm(x, invert=True)
if mat is not None:
mat = TensorPoint(mat)
x = (x - mat[:,:,2].unsqueeze(1)) @ torch.inverse(mat[:,:,:2].transpose(1,2))
return TensorPoint(x, sz=sz)
@patch
def affine_coord(x: TensorBBox, mat=None, coord_tfm=None, sz=None, mode='nearest',
pad_mode=PadMode.Zeros, align_corners=True):
if mat is None and coord_tfm is None: return x
if sz is None: sz = getattr(x, "img_size", None)
bs,n = x.shape[:2]
pnts = stack([x[...,:2], stack([x[...,0],x[...,3]],dim=2),
stack([x[...,2],x[...,1]],dim=2), x[...,2:]], dim=2)
pnts = TensorPoint(pnts.view(bs, 4*n, 2), img_size=sz).affine_coord(mat, coord_tfm, sz, mode, pad_mode)
pnts = pnts.view(bs, n, 4, 2)
tl,dr = pnts.min(dim=2)[0],pnts.max(dim=2)[0]
return TensorBBox(torch.cat([tl, dr], dim=2), img_size=sz)
# Cell
def _prepare_mat(x, mat):
h,w = getattr(x, 'img_size', x.shape[-2:])
mat[:,0,1] *= h/w
mat[:,1,0] *= w/h
return mat[:,:2]
# Cell
class AffineCoordTfm(RandTransform):
"Combine and apply affine and coord transforms"
order,split_idx = 30,None
def __init__(self, aff_fs=None, coord_fs=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection,
mode_mask='nearest', align_corners=None, **kwargs):
store_attr(but=['aff_fs','coord_fs'])
super().__init__(**kwargs)
self.aff_fs,self.coord_fs = L(aff_fs),L(coord_fs)
self.cp_size = None if size is None else (size,size) if isinstance(size, int) else tuple(size)
def before_call(self, b, split_idx):
while isinstance(b, tuple): b = b[0]
self.split_idx = split_idx
self.do,self.mat = True,self._get_affine_mat(b)
for t in self.coord_fs: t.before_call(b)
def compose(self, tfm):
"Compose `self` with another `AffineCoordTfm` to only do the interpolation step once"
# TODO: keep `name` up to date with the combination
# TODO: have option to only show a subset of the attrs, e.g. for `Flip`
self.aff_fs += tfm.aff_fs
self.coord_fs += tfm.coord_fs
def _get_affine_mat(self, x):
aff_m = _init_mat(x)
if self.split_idx: return _prepare_mat(x, aff_m)
ms = [f(x) for f in self.aff_fs]
ms = [m for m in ms if m is not None]
for m in ms: aff_m = aff_m @ m
return _prepare_mat(x, aff_m)
def _encode(self, x, mode, reverse=False):
coord_func = None if len(self.coord_fs)==0 or self.split_idx else partial(compose_tfms, tfms=self.coord_fs, reverse=reverse)
return x.affine_coord(self.mat, coord_func, sz=self.size, mode=mode, pad_mode=self.pad_mode, align_corners=self.align_corners)
def encodes(self, x:TensorImage): return self._encode(x, self.mode)
def encodes(self, x:TensorMask): return self._encode(x, self.mode_mask)
def encodes(self, x:(TensorPoint, TensorBBox)): return self._encode(x, self.mode, reverse=True)
# Cell
class RandomResizedCropGPU(RandTransform):
"Picks a random scaled crop of an image and resize it to `size`"
split_idx,order = None,30
def __init__(self, size, min_scale=0.08, ratio=(3/4, 4/3), mode='bilinear', valid_scale=1., **kwargs):
if isinstance(size, int): size = (size,size)
store_attr()
super().__init__(**kwargs)
def before_call(self, b, split_idx):
self.do = True
h,w = fastuple((b[0] if isinstance(b, tuple) else b).shape[-2:])
for attempt in range(10):
if split_idx: break
area = random.uniform(self.min_scale,1.) * w * h
ratio = math.exp(random.uniform(math.log(self.ratio[0]), math.log(self.ratio[1])))
nw = int(round(math.sqrt(area * ratio)))
nh = int(round(math.sqrt(area / ratio)))
if nw <= w and nh <= h:
self.cp_size = (nh,nw)
self.tl = random.randint(0,h - nh),random.randint(0,w-nw)
return
if w/h < self.ratio[0]: self.cp_size = (int(w/self.ratio[0]), w)
elif w/h > self.ratio[1]: self.cp_size = (h, int(h*self.ratio[1]))
else: self.cp_size = (h, w)
if split_idx: self.cp_size = (int(self.cp_size[0]*self.valid_scale), int(self.cp_size[1]*self.valid_scale))
self.tl = ((h-self.cp_size[0])//2,(w-self.cp_size[1])//2)
def encodes(self, x:TensorImage):
x = x[...,self.tl[0]:self.tl[0]+self.cp_size[0], self.tl[1]:self.tl[1]+self.cp_size[1]]
return TensorImage(x).affine_coord(sz=self.size, mode=self.mode)
# Cell
def mask_tensor(x, p=0.5, neutral=0., batch=False):
"Mask elements of `x` with `neutral` with probability `1-p`"
if p==1.: return x
if batch: return x if random.random() < p else x.new_zeros(*x.size()) + neutral
if neutral != 0: x.add_(-neutral)
mask = x.new_empty(*x.size()).bernoulli_(p)
x.mul_(mask)
return x.add_(neutral) if neutral != 0 else x
# Cell
def _draw_mask(x, def_draw, draw=None, p=0.5, neutral=0., batch=False):
"Creates mask_tensor based on `x` with `neutral` with probability `1-p`. "
if draw is None: draw=def_draw
if callable(draw): res=draw(x)
elif is_listy(draw):
assert len(draw)>=x.size(0)
res = tensor(draw[:x.size(0)], dtype=x.dtype, device=x.device)
else: res = x.new_zeros(x.size(0)) + draw
return TensorBase(mask_tensor(res, p=p, neutral=neutral, batch=batch))
# Cell
def affine_mat(*ms):
"Restructure length-6 vector `ms` into an affine matrix with 0,0,1 in the last line"
return stack([stack([ms[0], ms[1], ms[2]], dim=1),
stack([ms[3], ms[4], ms[5]], dim=1),
stack([t0(ms[0]), t0(ms[0]), t1(ms[0])], dim=1)], dim=1)
# Cell
def flip_mat(x, p=0.5, draw=None, batch=False):
"Return a random flip matrix"
def _def_draw(x): return x.new_ones(x.size(0))
mask = x.new_ones(x.size(0)) - 2*_draw_mask(x, _def_draw, draw=draw, p=p, batch=batch)
return affine_mat(mask, t0(mask), t0(mask),
t0(mask), t1(mask), t0(mask))
# Cell
def _get_default(x, mode=None, pad_mode=None):
if mode is None: mode='bilinear' if isinstance(x, TensorMask) else 'bilinear'
if pad_mode is None: pad_mode=PadMode.Zeros if isinstance(x, (TensorPoint, TensorBBox)) else PadMode.Reflection
x0 = x[0] if isinstance(x, tuple) else x
return x0,mode,pad_mode
# Internal Cell
@patch
def flip_batch(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), p=0.5, draw=None, size=None,
mode=None, pad_mode=None, align_corners=True, batch=False):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat=flip_mat(x0, p=p, draw=draw, batch=batch)
return x.affine_coord(mat=mat[:,:2], sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Flip(AffineCoordTfm):
"Randomly flip a batch of images with a probability `p`"
def __init__(self, p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=True, batch=False):
aff_fs = partial(flip_mat, p=p, draw=draw, batch=batch)
super().__init__(aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners, p=p)
# Cell
class DeterministicDraw():
def __init__(self, vals): self.vals,self.count = vals,-1
def __call__(self, x):
self.count += 1
return x.new_zeros(x.size(0)) + self.vals[self.count%len(self.vals)]
# Cell
class DeterministicFlip(Flip):
"Flip the batch every other call"
def __init__(self, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=True, **kwargs):
super().__init__(p=1., draw=DeterministicDraw([0,1]), mode=mode, pad_mode=pad_mode, align_corners=align_corners, **kwargs)
# Cell
def dihedral_mat(x, p=0.5, draw=None, batch=False):
"Return a random dihedral matrix"
def _def_draw(x): return torch.randint(0,8, (x.size(0),), device=x.device)
def _def_draw_b(x): return random.randint(0,7) + x.new_zeros((x.size(0),)).long()
idx = _draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, batch=batch).long()
xs = tensor([1,-1,1,-1,-1,1,1,-1], device=x.device).gather(0, idx)
ys = tensor([1,1,-1,1,-1,-1,1,-1], device=x.device).gather(0, idx)
m0 = tensor([1,1,1,0,1,0,0,0], device=x.device).gather(0, idx)
m1 = tensor([0,0,0,1,0,1,1,1], device=x.device).gather(0, idx)
return affine_mat(xs*m0, xs*m1, t0(xs),
ys*m1, ys*m0, t0(xs)).float()
# Internal Cell
@patch
def dihedral_batch(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), p=0.5, draw=None, size=None,
mode=None, pad_mode=None, batch=False, align_corners=True):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat = _prepare_mat(x, dihedral_mat(x0, p=p, draw=draw, batch=batch))
return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Dihedral(AffineCoordTfm):
"Apply a random dihedral transformation to a batch of images with a probability `p`"
def __init__(self, p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=None, batch=False):
f = partial(dihedral_mat, p=p, draw=draw, batch=batch)
super().__init__(aff_fs=f, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class DeterministicDihedral(Dihedral):
def __init__(self, size=None, mode='bilinear', pad_mode=PadMode.Reflection, align_corners=None):
"Flip the batch every other call"
super().__init__(p=1., draw=DeterministicDraw(list(range(8))), pad_mode=pad_mode, align_corners=align_corners)
# Cell
def rotate_mat(x, max_deg=10, p=0.5, draw=None, batch=False):
"Return a random rotation matrix with `max_deg` and `p`"
def _def_draw(x): return x.new_empty(x.size(0)).uniform_(-max_deg, max_deg)
def _def_draw_b(x): return x.new_zeros(x.size(0)) + random.uniform(-max_deg, max_deg)
thetas = _draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, batch=batch) * math.pi/180
return affine_mat(thetas.cos(), thetas.sin(), t0(thetas),
-thetas.sin(), thetas.cos(), t0(thetas))
# Internal Cell
@patch
@delegates(rotate_mat)
def rotate(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode=None, pad_mode=None, align_corners=True, **kwargs):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
mat = _prepare_mat(x, rotate_mat(x0, **kwargs))
return x.affine_coord(mat=mat, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Rotate(AffineCoordTfm):
"Apply a random rotation of at most `max_deg` with probability `p` to a batch of images"
def __init__(self, max_deg=10, p=0.5, draw=None, size=None, mode='bilinear', pad_mode=PadMode.Reflection,
align_corners=True, batch=False):
aff_fs = partial(rotate_mat, max_deg=max_deg, p=p, draw=draw, batch=batch)
super().__init__(aff_fs=aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
def zoom_mat(x, min_zoom=1., max_zoom=1.1, p=0.5, draw=None, draw_x=None, draw_y=None, batch=False):
"Return a random zoom matrix with `max_zoom` and `p`"
def _def_draw(x): return x.new_empty(x.size(0)).uniform_(min_zoom, max_zoom)
def _def_draw_b(x): return x.new_zeros(x.size(0)) + random.uniform(min_zoom, max_zoom)
def _def_draw_ctr(x): return x.new_empty(x.size(0)).uniform_(0,1)
def _def_draw_ctr_b(x): return x.new_zeros(x.size(0)) + random.uniform(0,1)
assert(min_zoom<=max_zoom)
s = 1/_draw_mask(x, _def_draw_b if batch else _def_draw, draw=draw, p=p, neutral=1., batch=batch)
def_draw_c = _def_draw_ctr_b if batch else _def_draw_ctr
col_pct = _draw_mask(x, def_draw_c, draw=draw_x, p=1., batch=batch)
row_pct = _draw_mask(x, def_draw_c, draw=draw_y, p=1., batch=batch)
col_c = (1-s) * (2*col_pct - 1)
row_c = (1-s) * (2*row_pct - 1)
return affine_mat(s, t0(s), col_c,
t0(s), s, row_c)
# Internal Cell
@patch
@delegates(zoom_mat)
def zoom(x: (TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode='bilinear', pad_mode=PadMode.Reflection,
align_corners=True, **kwargs):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
return x.affine_coord(mat=zoom_mat(x0, **kwargs)[:,:2], sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Zoom(AffineCoordTfm):
"Apply a random zoom of at most `max_zoom` with probability `p` to a batch of images"
def __init__(self,min_zoom=1., max_zoom=1.1, p=0.5, draw=None, draw_x=None, draw_y=None, size=None, mode='bilinear',
pad_mode=PadMode.Reflection, batch=False, align_corners=True):
aff_fs = partial(zoom_mat, min_zoom=min_zoom, max_zoom=max_zoom, p=p, draw=draw, draw_x=draw_x, draw_y=draw_y, batch=batch)
super().__init__(aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
def find_coeffs(p1, p2):
"Find coefficients for warp tfm from `p1` to `p2`"
m = []
p = p1[:,0,0]
#The equations we'll need to solve.
for i in range(p1.shape[1]):
m.append(stack([p2[:,i,0], p2[:,i,1], t1(p), t0(p), t0(p), t0(p), -p1[:,i,0]*p2[:,i,0], -p1[:,i,0]*p2[:,i,1]]))
m.append(stack([t0(p), t0(p), t0(p), p2[:,i,0], p2[:,i,1], t1(p), -p1[:,i,1]*p2[:,i,0], -p1[:,i,1]*p2[:,i,1]]))
#The 8 scalars we seek are solution of AX = B
A = stack(m).permute(2, 0, 1)
B = p1.view(p1.shape[0], 8, 1)
return torch.solve(B,A)[0]
# Cell
def apply_perspective(coords, coeffs):
"Apply perspective tranfom on `coords` with `coeffs`"
sz = coords.shape
coords = coords.view(sz[0], -1, 2)
coeffs = torch.cat([coeffs, t1(coeffs[:,:1])], dim=1).view(coeffs.shape[0], 3,3)
coords1 = coords @ coeffs[...,:2].transpose(1,2) + coeffs[...,2].unsqueeze(1)
if (coords1[...,2]==0.).any(): return coords[...,:2].view(*sz)
coords = coords1/coords1[...,2].unsqueeze(-1)
return coords[...,:2].view(*sz)
# Cell
class _WarpCoord():
def __init__(self, magnitude=0.2, p=0.5, draw_x=None, draw_y=None, batch=False):
store_attr()
self.coeffs = None
def _def_draw(self, x):
if not self.batch: return x.new_empty(x.size(0)).uniform_(-self.magnitude, self.magnitude)
return x.new_zeros(x.size(0)) + random.uniform(-self.magnitude, self.magnitude)
def before_call(self, x):
x_t = _draw_mask(x, self._def_draw, self.draw_x, p=self.p, batch=self.batch)
y_t = _draw_mask(x, self._def_draw, self.draw_y, p=self.p, batch=self.batch)
orig_pts = torch.tensor([[-1,-1], [-1,1], [1,-1], [1,1]], dtype=x.dtype, device=x.device)
self.orig_pts = orig_pts.unsqueeze(0).expand(x.size(0),4,2)
targ_pts = stack([stack([-1-y_t, -1-x_t]), stack([-1+y_t, 1+x_t]),
stack([ 1+y_t, -1+x_t]), stack([ 1-y_t, 1-x_t])])
self.targ_pts = targ_pts.permute(2,0,1)
def __call__(self, x, invert=False):
coeffs = find_coeffs(self.targ_pts, self.orig_pts) if invert else find_coeffs(self.orig_pts, self.targ_pts)
return apply_perspective(x, coeffs)
# Internal Cell
@patch
@delegates(_WarpCoord.__init__)
def warp(x:(TensorImage,TensorMask,TensorPoint,TensorBBox), size=None, mode='bilinear',
pad_mode=PadMode.Reflection, align_corners=True, **kwargs):
x0,mode,pad_mode = _get_default(x, mode, pad_mode)
coord_tfm = _WarpCoord(**kwargs)
coord_tfm.before_call(x0)
return x.affine_coord(coord_tfm=coord_tfm, sz=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# Cell
class Warp(AffineCoordTfm):
"Apply perspective warping with `magnitude` and `p` on a batch of matrices"
def __init__(self, magnitude=0.2, p=0.5, draw_x=None, draw_y=None,size=None, mode='bilinear',
pad_mode=PadMode.Reflection, batch=False, align_corners=True):
store_attr()
coord_fs = _WarpCoord(magnitude=magnitude, p=p, draw_x=draw_x, draw_y=draw_y, batch=batch)
super().__init__(coord_fs=coord_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners )
# Cell
@patch
def lighting(x: TensorImage, func): return TensorImage(torch.sigmoid(func(logit(x))))
# Cell
class SpaceTfm(RandTransform):
"Apply `fs` to the logits"
order = 40
def __init__(self, fs, space_fn, **kwargs):
super().__init__(**kwargs)
self.space_fn=space_fn
self.fs=L(fs)
def before_call(self, b, split_idx):
self.do = True
while isinstance(b, tuple): b = b[0]
for t in self.fs: t.before_call(b)
def compose(self, tfm):
"Compose `self` with another `LightingTransform`"
self.fs += tfm.fs
def encodes(self,x:TensorImage): return self.space_fn(x,partial(compose_tfms, tfms=self.fs))
# Cell
class LightingTfm(SpaceTfm):
"Apply `fs` to the logits"
order = 40
def __init__(self, fs, **kwargs):
super().__init__(fs, TensorImage.lighting, **kwargs)
# Cell
class _BrightnessLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: return x.new_empty(x.size(0)).uniform_(0.5*(1-self.max_lighting), 0.5*(1+self.max_lighting))
return x.new_zeros(x.size(0)) + random.uniform(0.5*(1-self.max_lighting), 0.5*(1+self.max_lighting))
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0.5, batch=self.batch)
def __call__(self, x): return x.add_(logit(self.change[:,None,None,None]))
# Internal Cell
@patch
@delegates(_BrightnessLogit.__init__)
def brightness(x: TensorImage, **kwargs):
func = _BrightnessLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# Cell
class Brightness(LightingTfm):
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False):
"Apply change in brightness of `max_lighting` to batch of images with probability `p`."
store_attr()
super().__init__(_BrightnessLogit(max_lighting, p, draw, batch))
# Cell
class _ContrastLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x): return x.mul_(self.change[:,None,None,None])
# Internal Cell
@patch
@delegates(_ContrastLogit.__init__)
def contrast(x: TensorImage, **kwargs):
func = _ContrastLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# Cell
class Contrast(LightingTfm):
"Apply change in contrast of `max_lighting` to batch of images with probability `p`."
def __init__(self,max_lighting=0.2, p=0.75, draw=None, batch=False):
store_attr()
super().__init__(_ContrastLogit(max_lighting, p, draw, batch))
# Cell
def grayscale(x):
"Tensor to grayscale tensor. Uses the ITU-R 601-2 luma transform. "
return (x*torch.tensor([0.2989,0.5870,0.1140],device=x.device)[...,None,None]).sum(1)[:,None]
# Cell
class _SaturationLogit():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x):
#interpolate between grayscale and original in-place
gs = grayscale(x)
gs.mul_(1-self.change[:,None,None,None])
x.mul_(self.change[:,None,None,None])
return x.add_(gs)
# Internal Cell
@patch
@delegates(_SaturationLogit.__init__)
def saturation(x: TensorImage, **kwargs):
func = _SaturationLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# Cell
class Saturation(LightingTfm):
"Apply change in saturation of `max_lighting` to batch of images with probability `p`."
# Ref: https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.functional.adjust_saturation
def __init__(self,max_lighting=0.2, p=0.75, draw=None, batch=False):
store_attr()
super().__init__(_SaturationLogit(max_lighting, p, draw, batch))
# Cell
def rgb2hsv(img):
"Converts a RGB image to an HSV image. Note: Will not work on logit space images."
r, g, b = img.unbind(1)
# temp commented out due to https://github.com/pytorch/pytorch/issues/47069
# maxc = torch.max(img, dim=1).values
# minc = torch.min(img, dim=1).values
maxc = torch.max(img, dim=1)[0]
minc = torch.min(img, dim=1)[0]
eqc = maxc == minc
cr = maxc - minc
s = cr / torch.where(eqc, maxc.new_ones(()), maxc)
cr_divisor = torch.where(eqc, maxc.new_ones(()), cr)
rc = (maxc - r) / cr_divisor
gc = (maxc - g) / cr_divisor
bc = (maxc - b) / cr_divisor
hr = (maxc == r) * (bc - gc)
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
h = (hr + hg + hb)
h = torch.fmod((h / 6.0 + 1.0), 1.0)
return torch.stack((h, s, maxc),dim=1)
# Cell
def hsv2rgb(img):
"Converts a HSV image to an RGB image."
h, s, v = img.unbind(1)
i = torch.floor(h * 6.0)
f = (h * 6.0) - i
i = i.to(dtype=torch.int32)
p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
i = i % 6
mask = i[:,None] == torch.arange(6,device=i.device)[:, None, None][None]
a1 = torch.stack((v, q, p, p, t, v),dim=1)
a2 = torch.stack((t, v, v, q, p, p),dim=1)
a3 = torch.stack((p, p, t, v, v, q),dim=1)
a4 = torch.stack((a1, a2, a3),dim=1)
return torch.einsum("nijk, nxijk -> nxjk", mask.to(dtype=img.dtype), a4)
# Internal Cell
@patch
def hsv(x: TensorImage, func): return TensorImage(hsv2rgb(func(rgb2hsv(x))))
# Cell
class HSVTfm(SpaceTfm):
"Apply `fs` to the images in HSV space"
def __init__(self, fs, **kwargs):
super().__init__(fs, TensorImage.hsv, **kwargs)
# Cell
class _Hue():
def __init__(self, max_hue=0.1, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_hue), -math.log(1-self.max_hue))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_hue), -math.log(1-self.max_hue))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=0., batch=self.batch)
def __call__(self, x):
h,s,v = x.unbind(1)
h += self.change[:,None,None]
h = h % 1.0
return x.set_(torch.stack((h, s, v),dim=1))
# Internal Cell
@patch
@delegates(_Hue.__init__)
def hue(x: TensorImage, **kwargs):
func = _Hue(**kwargs)
func.before_call(x)
return TensorImage(x.hsv(func))
# Cell
class Hue(HSVTfm):
"Apply change in hue of `max_hue` to batch of images with probability `p`."
# Ref: https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.functional.adjust_hue
def __init__(self,max_hue=0.1, p=0.75, draw=None, batch=False):
super().__init__(_Hue(max_hue, p, draw, batch))
# Cell
def cutout_gaussian(x, areas):
"Replace all `areas` in `x` with N(0,1) noise"
chan,img_h,img_w = x.shape[-3:]
for rl,rh,cl,ch in areas: x[..., rl:rh, cl:ch].normal_()
return x
# Cell
def norm_apply_denorm(x, f, nrm):
"Normalize `x` with `nrm`, then apply `f`, then denormalize"
y = f(nrm(x.clone()))
return nrm.decode(y).clamp(0,1)
# Cell
def _slice(area, sz):
bound = int(round(math.sqrt(area)))
loc = random.randint(0, max(sz-bound, 0))
return loc,loc+bound
# Cell
class RandomErasing(RandTransform):
"Randomly selects a rectangle region in an image and randomizes its pixels."
order = 100 # After Normalize
def __init__(self, p=0.5, sl=0., sh=0.3, min_aspect=0.3, max_count=1):
store_attr()
super().__init__(p=p)
self.log_ratio = (math.log(min_aspect), math.log(1/min_aspect))
def _bounds(self, area, img_h, img_w):
r_area = random.uniform(self.sl,self.sh) * area
aspect = math.exp(random.uniform(*self.log_ratio))
return _slice(r_area*aspect, img_h) + _slice(r_area/aspect, img_w)
def encodes(self,x:TensorImage):
count = random.randint(1, self.max_count)
_,img_h,img_w = x.shape[-3:]
area = img_h*img_w/count
areas = [self._bounds(area, img_h, img_w) for _ in range(count)]
return cutout_gaussian(x, areas)
# Cell
def _compose_same_tfms(tfms):
tfms = L(tfms)
if len(tfms) == 0: return None
res = tfms[0]
for tfm in tfms[1:]: res.compose(tfm)
return res
# Cell
def setup_aug_tfms(tfms):
"Go through `tfms` and combines together affine/coord or lighting transforms"
aff_tfms = [tfm for tfm in tfms if isinstance(tfm, AffineCoordTfm)]
lig_tfms = [tfm for tfm in tfms if isinstance(tfm, LightingTfm)]
others = [tfm for tfm in tfms if tfm not in aff_tfms+lig_tfms]
lig_tfm = _compose_same_tfms(lig_tfms)
aff_tfm = _compose_same_tfms(aff_tfms)
res = [aff_tfm] if aff_tfm is not None else []
if lig_tfm is not None: res.append(lig_tfm)
return res + others
# Cell
def aug_transforms(mult=1.0, do_flip=True, flip_vert=False, max_rotate=10., min_zoom=1., max_zoom=1.1,
max_lighting=0.2, max_warp=0.2, p_affine=0.75, p_lighting=0.75, xtra_tfms=None, size=None,
mode='bilinear', pad_mode=PadMode.Reflection, align_corners=True, batch=False, min_scale=1.):
"Utility func to easily create a list of flip, rotate, zoom, warp, lighting transforms."
res,tkw = [],dict(size=size if min_scale==1. else None, mode=mode, pad_mode=pad_mode, batch=batch, align_corners=align_corners)
max_rotate,max_lighting,max_warp = array([max_rotate,max_lighting,max_warp])*mult
if do_flip: res.append(Dihedral(p=0.5, **tkw) if flip_vert else Flip(p=0.5, **tkw))
if max_warp: res.append(Warp(magnitude=max_warp, p=p_affine, **tkw))
if max_rotate: res.append(Rotate(max_deg=max_rotate, p=p_affine, **tkw))
if min_zoom<1 or max_zoom>1: res.append(Zoom(min_zoom=min_zoom, max_zoom=max_zoom, p=p_affine, **tkw))
if max_lighting:
res.append(Brightness(max_lighting=max_lighting, p=p_lighting, batch=batch))
res.append(Contrast(max_lighting=max_lighting, p=p_lighting, batch=batch))
if min_scale!=1.: xtra_tfms = RandomResizedCropGPU(size, min_scale=min_scale, ratio=(1,1)) + L(xtra_tfms)
return setup_aug_tfms(res + L(xtra_tfms))
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/augment.py
|
augment.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/07_vision.core.ipynb (unless otherwise specified).
__all__ = ['Image', 'ToTensor', 'imagenet_stats', 'cifar_stats', 'mnist_stats', 'n_px', 'shape', 'aspect', 'to_image',
'load_image', 'image2tensor', 'PILBase', 'PILImage', 'PILImageBW', 'PILMask', 'OpenMask', 'AddMaskCodes',
'TensorPoint', 'TensorPointCreate', 'get_annotations', 'TensorBBox', 'LabeledBBox', 'encodes', 'encodes',
'PointScaler', 'BBoxLabeler', 'decodes', 'encodes', 'decodes']
# Cell
from ..torch_basics import *
from ..data.all import *
from PIL import Image
# Cell
#nbdev_comment _all_ = ['Image','ToTensor']
# Cell
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])
mnist_stats = ([0.131], [0.308])
# Cell
if not hasattr(Image,'_patched'):
_old_sz = Image.Image.size.fget
@patch(as_prop=True)
def size(x:Image.Image): return fastuple(_old_sz(x))
Image._patched = True
# Cell
@patch(as_prop=True)
def n_px(x: Image.Image): return x.size[0] * x.size[1]
# Cell
@patch(as_prop=True)
def shape(x: Image.Image): return x.size[1],x.size[0]
# Cell
@patch(as_prop=True)
def aspect(x: Image.Image): return x.size[0]/x.size[1]
# Cell
@patch
def reshape(x: Image.Image, h, w, resample=0):
"`resize` `x` to `(w,h)`"
return x.resize((w,h), resample=resample)
# Cell
@patch
def to_bytes_format(im:Image.Image, format='png'):
"Convert to bytes, default to PNG format"
arr = io.BytesIO()
im.save(arr, format=format)
return arr.getvalue()
# Cell
@patch
def to_thumb(self:Image.Image, h, w=None):
"Same as `thumbnail`, but uses a copy"
if w is None: w=h
im = self.copy()
im.thumbnail((w,h))
return im
# Cell
@patch
def resize_max(x: Image.Image, resample=0, max_px=None, max_h=None, max_w=None):
"`resize` `x` to `max_px`, or `max_h`, or `max_w`"
h,w = x.shape
if max_px and x.n_px>max_px: h,w = fastuple(h,w).mul(math.sqrt(max_px/x.n_px))
if max_h and h>max_h: h,w = (max_h ,max_h*w/h)
if max_w and w>max_w: h,w = (max_w*h/w,max_w )
return x.reshape(round(h), round(w), resample=resample)
# Cell
def to_image(x):
"Convert a tensor or array to a PIL int8 Image"
if isinstance(x,Image.Image): return x
if isinstance(x,Tensor): x = to_np(x.permute((1,2,0)))
if x.dtype==np.float32: x = (x*255).astype(np.uint8)
return Image.fromarray(x, mode=['RGB','CMYK'][x.shape[0]==4])
# Cell
def load_image(fn, mode=None):
"Open and load a `PIL.Image` and convert to `mode`"
im = Image.open(fn)
im.load()
im = im._new(im.im)
return im.convert(mode) if mode else im
# Cell
def image2tensor(img):
"Transform image to byte tensor in `c*h*w` dim order."
res = tensor(img)
if res.dim()==2: res = res.unsqueeze(-1)
return res.permute(2,0,1)
# Cell
class PILBase(Image.Image, metaclass=BypassNewMeta):
_bypass_type=Image.Image
_show_args = {'cmap':'viridis'}
_open_args = {'mode': 'RGB'}
@classmethod
def create(cls, fn:(Path,str,Tensor,ndarray,bytes), **kwargs)->None:
"Open an `Image` from path `fn`"
if isinstance(fn,TensorImage): fn = fn.permute(1,2,0).type(torch.uint8)
if isinstance(fn, TensorMask): fn = fn.type(torch.uint8)
if isinstance(fn,Tensor): fn = fn.numpy()
if isinstance(fn,ndarray): return cls(Image.fromarray(fn))
if isinstance(fn,bytes): fn = io.BytesIO(fn)
return cls(load_image(fn, **merge(cls._open_args, kwargs)))
def show(self, ctx=None, **kwargs):
"Show image using `merge(self._show_args, kwargs)`"
return show_image(self, ctx=ctx, **merge(self._show_args, kwargs))
def __repr__(self): return f'{self.__class__.__name__} mode={self.mode} size={"x".join([str(d) for d in self.size])}'
# Cell
class PILImage(PILBase): pass
# Cell
class PILImageBW(PILImage): _show_args,_open_args = {'cmap':'Greys'},{'mode': 'L'}
# Cell
class PILMask(PILBase): _open_args,_show_args = {'mode':'L'},{'alpha':0.5, 'cmap':'tab20'}
# Cell
OpenMask = Transform(PILMask.create)
OpenMask.loss_func = CrossEntropyLossFlat(axis=1)
PILMask.create = OpenMask
# Cell
class AddMaskCodes(Transform):
"Add the code metadata to a `TensorMask`"
def __init__(self, codes=None):
self.codes = codes
if codes is not None: self.vocab,self.c = codes,len(codes)
def decodes(self, o:TensorMask):
if self.codes is not None: o.codes=self.codes
return o
# Cell
class TensorPoint(TensorBase):
"Basic type for points in an image"
_show_args = dict(s=10, marker='.', c='r')
@classmethod
def create(cls, t, img_size=None)->None:
"Convert an array or a list of points `t` to a `Tensor`"
return cls(tensor(t).view(-1, 2).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
if 'figsize' in kwargs: del kwargs['figsize']
x = self.view(-1,2)
ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs})
return ctx
# Cell
TensorPointCreate = Transform(TensorPoint.create)
TensorPointCreate.loss_func = MSELossFlat()
TensorPoint.create = TensorPointCreate
# Cell
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {o['id']:o['name'] for o in annot_dict['categories']}
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[0],bb[1], bb[0]+bb[2], bb[1]+bb[3]])
id2cats[o['image_id']].append(classes[o['category_id']])
id2images = {o['id']:ifnone(prefix, '') + o['file_name'] for o in annot_dict['images'] if o['id'] in id2bboxes}
ids = list(id2images.keys())
return [id2images[k] for k in ids], [(id2bboxes[k], id2cats[k]) for k in ids]
# Cell
from matplotlib import patches, patheffects
# Cell
def _draw_outline(o, lw):
o.set_path_effects([patheffects.Stroke(linewidth=lw, foreground='black'), patheffects.Normal()])
def _draw_rect(ax, b, color='white', text=None, text_size=14, hw=True, rev=False):
lx,ly,w,h = b
if rev: lx,ly,w,h = ly,lx,h,w
if not hw: w,h = w-lx,h-ly
patch = ax.add_patch(patches.Rectangle((lx,ly), w, h, fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(lx,ly, text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1)
# Cell
class TensorBBox(TensorPoint):
"Basic type for a tensor of bounding boxes in an image"
@classmethod
def create(cls, x, img_size=None)->None: return cls(tensor(x).view(-1, 4).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
x = self.view(-1,4)
for b in x: _draw_rect(ctx, b, hw=False, **kwargs)
return ctx
# Cell
class LabeledBBox(L):
"Basic type for a list of bounding boxes in an image"
def show(self, ctx=None, **kwargs):
for b,l in zip(self.bbox, self.lbl):
if l != '#na#': ctx = retain_type(b, self.bbox).show(ctx=ctx, text=l)
return ctx
bbox,lbl = add_props(lambda i,self: self[i])
# Cell
PILImage ._tensor_cls = TensorImage
PILImageBW._tensor_cls = TensorImageBW
PILMask ._tensor_cls = TensorMask
# Cell
@ToTensor
def encodes(self, o:PILBase): return o._tensor_cls(image2tensor(o))
@ToTensor
def encodes(self, o:PILMask): return o._tensor_cls(image2tensor(o)[0])
# Cell
def _scale_pnts(y, sz, do_scale=True, y_first=False):
if y_first: y = y.flip(1)
res = y * 2/tensor(sz).float() - 1 if do_scale else y
return TensorPoint(res, img_size=sz)
def _unscale_pnts(y, sz): return TensorPoint((y+1) * tensor(sz).float()/2, img_size=sz)
# Cell
class PointScaler(Transform):
"Scale a tensor representing points"
order = 1
def __init__(self, do_scale=True, y_first=False): self.do_scale,self.y_first = do_scale,y_first
def _grab_sz(self, x):
self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size
return x
def _get_sz(self, x): return getattr(x, 'img_size') if self.sz is None else self.sz
def setups(self, dl):
res = first(dl.do_item(0), risinstance(TensorPoint))
if res is not None: self.c = res.numel()
def encodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def decodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def encodes(self, x:TensorPoint): return _scale_pnts(x, self._get_sz(x), self.do_scale, self.y_first)
def decodes(self, x:TensorPoint): return _unscale_pnts(x.view(-1, 2), self._get_sz(x))
# Cell
class BBoxLabeler(Transform):
def setups(self, dl): self.vocab = dl.vocab
def decode (self, x, **kwargs):
self.bbox,self.lbls = None,None
return self._call('decodes', x, **kwargs)
def decodes(self, x:TensorMultiCategory):
self.lbls = [self.vocab[a] for a in x]
return x if self.bbox is None else LabeledBBox(self.bbox, self.lbls)
def decodes(self, x:TensorBBox):
self.bbox = x
return self.bbox if self.lbls is None else LabeledBBox(self.bbox, self.lbls)
# Cell
#LabeledBBox can be sent in a tl with MultiCategorize (depending on the order of the tls) but it is already decoded.
@MultiCategorize
def decodes(self, x:LabeledBBox): return x
# Cell
@PointScaler
def encodes(self, x:TensorBBox):
pnts = self.encodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
@PointScaler
def decodes(self, x:TensorBBox):
pnts = self.decodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/core.py
|
core.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09b_vision.utils.ipynb (unless otherwise specified).
__all__ = ['download_images', 'resize_to', 'verify_image', 'verify_images', 'resize_image', 'resize_images']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
from pathlib import Path
# Cell
def _get_downloaded_image_filename(dest, name, suffix):
start_index = 1
candidate_name = name
while (dest/f"{candidate_name}{suffix}").is_file():
candidate_name = f"{candidate_name}{start_index}"
start_index += 1
return candidate_name
# Cell
def _download_image_inner(dest, inp, timeout=4, preserve_filename=False):
i,url = inp
url_path = Path(url)
suffix = url_path.suffix if url_path.suffix else '.jpg'
name = _get_downloaded_image_filename(dest, url_path.stem, suffix) if preserve_filename else f"{i:08d}"
try: download_url(url, dest/f"{name}{suffix}", overwrite=True, show_progress=False, timeout=timeout)
except Exception as e: f"Couldn't download {url}."
# Cell
def download_images(dest, url_file=None, urls=None, max_pics=1000, n_workers=8, timeout=4, preserve_filename=False):
"Download images listed in text file `url_file` to path `dest`, at most `max_pics`"
if urls is None: urls = url_file.read_text().strip().split("\n")[:max_pics]
dest = Path(dest)
dest.mkdir(exist_ok=True)
parallel(partial(_download_image_inner, dest, timeout=timeout, preserve_filename=preserve_filename),
list(enumerate(urls)), n_workers=n_workers)
# Cell
def resize_to(img, targ_sz, use_min=False):
"Size to resize to, to hit `targ_sz` at same aspect ratio, in PIL coords (i.e w*h)"
w,h = img.size
min_sz = (min if use_min else max)(w,h)
ratio = targ_sz/min_sz
return int(w*ratio),int(h*ratio)
# Cell
def verify_image(fn):
"Confirm that `fn` can be opened"
try:
im = Image.open(fn)
im.draft(im.mode, (32,32))
im.load()
return True
except: return False
# Cell
def verify_images(fns):
"Find images in `fns` that can't be opened"
return L(fns[i] for i,o in enumerate(parallel(verify_image, fns)) if not o)
# Cell
def resize_image(file, dest, max_size=None, n_channels=3, ext=None,
img_format=None, resample=Image.BILINEAR, resume=False, **kwargs ):
"Resize file to dest to max_size"
dest = Path(dest)
dest_fname = dest/file.name
if resume and dest_fname.exists(): return
if verify_image(file):
img = Image.open(file)
imgarr = np.array(img)
img_channels = 1 if len(imgarr.shape) == 2 else imgarr.shape[2]
if (max_size is not None and (img.height > max_size or img.width > max_size)) or img_channels != n_channels:
if ext is not None: dest_fname=dest_fname.with_suffix(ext)
if max_size is not None:
new_sz = resize_to(img, max_size)
img = img.resize(new_sz, resample=resample)
if n_channels == 3: img = img.convert("RGB")
img.save(dest_fname, img_format, **kwargs)
# Cell
def resize_images(path, max_workers=defaults.cpus, max_size=None, recurse=False,
dest=Path('.'), n_channels=3, ext=None, img_format=None, resample=Image.BILINEAR,
resume=None, **kwargs):
"Resize files on path recursively to dest to max_size"
path = Path(path)
if resume is None and dest != Path('.'): resume=False
os.makedirs(dest, exist_ok=True)
files = get_image_files(path, recurse=recurse)
parallel(resize_image, files, max_workers=max_workers, max_size=max_size, dest=dest, n_channels=n_channels, ext=ext,
img_format=img_format, resample=resample, resume=resume, **kwargs)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/utils.py
|
utils.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09c_vision.widgets.ipynb (unless otherwise specified).
__all__ = ['HBox', 'VBox', 'widgets', 'Button', 'Checkbox', 'Dropdown', 'Layout', 'Box', 'Output', 'Label',
'FileUpload', 'widget', 'carousel', 'ImagesCleaner', 'ImageClassifierCleaner']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
from ipywidgets import HBox,VBox,widgets,Button,Checkbox,Dropdown,Layout,Box,Output,Label,FileUpload
# Cell
#nbdev_comment _all_ = ['HBox','VBox','widgets','Button','Checkbox','Dropdown','Layout','Box','Output','Label','FileUpload']
# Cell
@patch
def __getitem__(self:Box, i): return self.children[i]
# Cell
def widget(im, *args, **layout):
"Convert anything that can be `display`ed by IPython into a widget"
o = Output(layout=merge(*args, layout))
with o: display(im)
return o
# Cell
def _update_children(change):
for o in change['owner'].children:
if not o.layout.flex: o.layout.flex = '0 0 auto'
# Cell
def carousel(children=(), **layout):
"A horizontally scrolling carousel"
def_layout = dict(overflow='scroll hidden', flex_flow='row', display='flex')
res = Box([], layout=merge(def_layout, layout))
res.observe(_update_children, names='children')
res.children = children
return res
# Cell
def _open_thumb(fn, h, w): return Image.open(fn).to_thumb(h, w).convert('RGBA')
# Cell
class ImagesCleaner:
"A widget that displays all images in `fns` along with a `Dropdown`"
def __init__(self, opts=(), height=128, width=256, max_n=30):
opts = ('<Keep>', '<Delete>')+tuple(opts)
store_attr('opts,height,width,max_n')
self.widget = carousel(width='100%')
def set_fns(self, fns):
self.fns = L(fns)[:self.max_n]
ims = parallel(_open_thumb, self.fns, h=self.height, w=self.width, progress=False,
n_workers=min(len(self.fns)//10,defaults.cpus))
self.widget.children = [VBox([widget(im, height=f'{self.height}px'), Dropdown(
options=self.opts, layout={'width': 'max-content'})]) for im in ims]
def _ipython_display_(self): display(self.widget)
def values(self): return L(self.widget.children).itemgot(1).attrgot('value')
def delete(self): return self.values().argwhere(eq('<Delete>'))
def change(self):
idxs = self.values().argwhere(not_(in_(['<Delete>','<Keep>'])))
return idxs.zipwith(self.values()[idxs])
# Cell
def _get_iw_info(learn, ds_idx=0):
dl = learn.dls[ds_idx].new(shuffle=False, drop_last=False)
inp,probs,targs,preds,losses = learn.get_preds(dl=dl, with_input=True, with_loss=True, with_decoded=True)
inp,targs = L(zip(*dl.decode_batch((inp,targs), max_n=9999)))
return L([dl.dataset.items,targs,losses]).zip()
# Cell
@delegates(ImagesCleaner)
class ImageClassifierCleaner(GetAttr):
"A widget that provides an `ImagesCleaner` with a CNN `Learner`"
def __init__(self, learn, **kwargs):
vocab = learn.dls.vocab
self.default = self.iw = ImagesCleaner(vocab, **kwargs)
self.dd_cats = Dropdown(options=vocab)
self.dd_ds = Dropdown(options=('Train','Valid'))
self.iwis = _get_iw_info(learn,0),_get_iw_info(learn,1)
self.dd_ds.observe(self.on_change_ds, 'value')
self.dd_cats.observe(self.on_change_ds, 'value')
self.on_change_ds()
self.widget = VBox([self.dd_cats, self.dd_ds, self.iw.widget])
def _ipython_display_(self): display(self.widget)
def on_change_ds(self, change=None):
info = L(o for o in self.iwis[self.dd_ds.index] if o[1]==self.dd_cats.value)
self.iw.set_fns(info.sorted(2, reverse=True).itemgot(0))
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/widgets.py
|
widgets.py
|
from .all import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/24_vision.gan.ipynb (unless otherwise specified).
__all__ = ['GANModule', 'basic_critic', 'AddChannels', 'basic_generator', 'DenseResBlock', 'gan_critic', 'GANLoss',
'AdaptiveLoss', 'accuracy_thresh_expand', 'set_freeze_model', 'GANTrainer', 'FixedGANSwitcher',
'AdaptiveGANSwitcher', 'GANDiscriminativeLR', 'InvisibleTensor', 'generate_noise', 'gan_loss_from_func',
'GANLearner']
# Cell
from ..basics import *
from .all import *
# Cell
class GANModule(Module):
"Wrapper around a `generator` and a `critic` to create a GAN."
def __init__(self, generator=None, critic=None, gen_mode=False):
if generator is not None: self.generator=generator
if critic is not None: self.critic =critic
store_attr('gen_mode')
def forward(self, *args):
return self.generator(*args) if self.gen_mode else self.critic(*args)
def switch(self, gen_mode=None):
"Put the module in generator mode if `gen_mode`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
# Cell
@delegates(ConvLayer.__init__)
def basic_critic(in_size, n_channels, n_features=64, n_extra_layers=0, norm_type=NormType.Batch, **kwargs):
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [ConvLayer(n_channels, n_features, 4, 2, 1, norm_type=None, **kwargs)]
cur_size, cur_ftrs = in_size//2, n_features
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, norm_type=norm_type, **kwargs) for _ in range(n_extra_layers)]
while cur_size > 4:
layers.append(ConvLayer(cur_ftrs, cur_ftrs*2, 4, 2, 1, norm_type=norm_type, **kwargs))
cur_ftrs *= 2 ; cur_size //= 2
init = kwargs.get('init', nn.init.kaiming_normal_)
layers += [init_default(nn.Conv2d(cur_ftrs, 1, 4, padding=0), init), Flatten()]
return nn.Sequential(*layers)
# Cell
class AddChannels(Module):
"Add `n_dim` channels at the end of the input."
def __init__(self, n_dim): self.n_dim=n_dim
def forward(self, x): return x.view(*(list(x.shape)+[1]*self.n_dim))
# Cell
@delegates(ConvLayer.__init__)
def basic_generator(out_size, n_channels, in_sz=100, n_features=64, n_extra_layers=0, **kwargs):
"A basic generator from `in_sz` to images `n_channels` x `out_size` x `out_size`."
cur_size, cur_ftrs = 4, n_features//2
while cur_size < out_size: cur_size *= 2; cur_ftrs *= 2
layers = [AddChannels(2), ConvLayer(in_sz, cur_ftrs, 4, 1, transpose=True, **kwargs)]
cur_size = 4
while cur_size < out_size // 2:
layers.append(ConvLayer(cur_ftrs, cur_ftrs//2, 4, 2, 1, transpose=True, **kwargs))
cur_ftrs //= 2; cur_size *= 2
layers += [ConvLayer(cur_ftrs, cur_ftrs, 3, 1, 1, transpose=True, **kwargs) for _ in range(n_extra_layers)]
layers += [nn.ConvTranspose2d(cur_ftrs, n_channels, 4, 2, 1, bias=False), nn.Tanh()]
return nn.Sequential(*layers)
# Cell
_conv_args = dict(act_cls = partial(nn.LeakyReLU, negative_slope=0.2), norm_type=NormType.Spectral)
def _conv(ni, nf, ks=3, stride=1, self_attention=False, **kwargs):
if self_attention: kwargs['xtra'] = SelfAttention(nf)
return ConvLayer(ni, nf, ks=ks, stride=stride, **_conv_args, **kwargs)
# Cell
@delegates(ConvLayer)
def DenseResBlock(nf, norm_type=NormType.Batch, **kwargs):
"Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`."
return SequentialEx(ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
ConvLayer(nf, nf, norm_type=norm_type, **kwargs),
MergeLayer(dense=True))
# Cell
def gan_critic(n_channels=3, nf=128, n_blocks=3, p=0.15):
"Critic to train a `GAN`."
layers = [
_conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
DenseResBlock(nf, **_conv_args)]
nf *= 2 # after dense block
for i in range(n_blocks):
layers += [
nn.Dropout2d(p),
_conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))]
nf *= 2
layers += [
ConvLayer(nf, 1, ks=4, bias=False, padding=0, norm_type=NormType.Spectral, act_cls=None),
Flatten()]
return nn.Sequential(*layers)
# Cell
class GANLoss(GANModule):
"Wrapper around `crit_loss_func` and `gen_loss_func`"
def __init__(self, gen_loss_func, crit_loss_func, gan_model):
super().__init__()
store_attr('gen_loss_func,crit_loss_func,gan_model')
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.gen_loss_func`"
fake_pred = self.gan_model.critic(output)
self.gen_loss = self.gen_loss_func(fake_pred, output, target)
return self.gen_loss
def critic(self, real_pred, input):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.crit_loss_func`."
fake = self.gan_model.generator(input).requires_grad_(False)
fake_pred = self.gan_model.critic(fake)
self.crit_loss = self.crit_loss_func(real_pred, fake_pred)
return self.crit_loss
# Cell
class AdaptiveLoss(Module):
"Expand the `target` to match the `output` size before applying `crit`."
def __init__(self, crit): self.crit = crit
def forward(self, output, target):
return self.crit(output, target[:,None].expand_as(output).float())
# Cell
def accuracy_thresh_expand(y_pred, y_true, thresh=0.5, sigmoid=True):
"Compute accuracy after expanding `y_true` to the size of `y_pred`."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh).byte()==y_true[:,None].expand_as(y_pred).byte()).float().mean()
# Cell
def set_freeze_model(m, rg):
for p in m.parameters(): p.requires_grad_(rg)
# Cell
class GANTrainer(Callback):
"Handles GAN Training."
run_after = TrainEvalCallback
def __init__(self, switch_eval=False, clip=None, beta=0.98, gen_first=False, show_img=True):
store_attr('switch_eval,clip,gen_first,show_img')
self.gen_loss,self.crit_loss = AvgSmoothLoss(beta=beta),AvgSmoothLoss(beta=beta)
def _set_trainable(self):
train_model = self.generator if self.gen_mode else self.critic
loss_model = self.generator if not self.gen_mode else self.critic
set_freeze_model(train_model, True)
set_freeze_model(loss_model, False)
if self.switch_eval:
train_model.train()
loss_model.eval()
def before_fit(self):
"Initialize smootheners."
self.generator,self.critic = self.model.generator,self.model.critic
self.gen_mode = self.gen_first
self.switch(self.gen_mode)
self.crit_losses,self.gen_losses = [],[]
self.gen_loss.reset() ; self.crit_loss.reset()
#self.recorder.no_val=True
#self.recorder.add_metric_names(['gen_loss', 'disc_loss'])
#self.imgs,self.titles = [],[]
def before_validate(self):
"Switch in generator mode for showing results."
self.switch(gen_mode=True)
def before_batch(self):
"Clamp the weights with `self.clip` if it's not None, set the correct input/target."
if self.training and self.clip is not None:
for p in self.critic.parameters(): p.data.clamp_(-self.clip, self.clip)
if not self.gen_mode:
(self.learn.xb,self.learn.yb) = (self.yb,self.xb)
def after_batch(self):
"Record `last_loss` in the proper list."
if not self.training: return
if self.gen_mode:
self.gen_loss.accumulate(self.learn)
self.gen_losses.append(self.gen_loss.value)
self.last_gen = self.learn.to_detach(self.pred)
else:
self.crit_loss.accumulate(self.learn)
self.crit_losses.append(self.crit_loss.value)
def before_epoch(self):
"Put the critic or the generator back to eval if necessary."
self.switch(self.gen_mode)
#def after_epoch(self):
# "Show a sample image."
# if not hasattr(self, 'last_gen') or not self.show_img: return
# data = self.learn.data
# img = self.last_gen[0]
# norm = getattr(data,'norm',False)
# if norm and norm.keywords.get('do_y',False): img = data.denorm(img)
# img = data.train_ds.y.reconstruct(img)
# self.imgs.append(img)
# self.titles.append(f'Epoch {epoch}')
# pbar.show_imgs(self.imgs, self.titles)
# return add_metrics(last_metrics, [getattr(self.smoothenerG,'smooth',None),getattr(self.smoothenerC,'smooth',None)])
def switch(self, gen_mode=None):
"Switch the model and loss function, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self._set_trainable()
self.model.switch(gen_mode)
self.loss_func.switch(gen_mode)
# Cell
class FixedGANSwitcher(Callback):
"Switcher to do `n_crit` iterations of the critic then `n_gen` iterations of the generator."
run_after = GANTrainer
def __init__(self, n_crit=1, n_gen=1): store_attr('n_crit,n_gen')
def before_train(self): self.n_c,self.n_g = 0,0
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.learn.gan_trainer.gen_mode:
self.n_g += 1
n_iter,n_in,n_out = self.n_gen,self.n_c,self.n_g
else:
self.n_c += 1
n_iter,n_in,n_out = self.n_crit,self.n_g,self.n_c
target = n_iter if isinstance(n_iter, int) else n_iter(n_in)
if target == n_out:
self.learn.gan_trainer.switch()
self.n_c,self.n_g = 0,0
# Cell
class AdaptiveGANSwitcher(Callback):
"Switcher that goes back to generator/critic when the loss goes below `gen_thresh`/`crit_thresh`."
run_after = GANTrainer
def __init__(self, gen_thresh=None, critic_thresh=None):
store_attr('gen_thresh,critic_thresh')
def after_batch(self):
"Switch the model if necessary."
if not self.training: return
if self.gan_trainer.gen_mode:
if self.gen_thresh is None or self.loss < self.gen_thresh: self.gan_trainer.switch()
else:
if self.critic_thresh is None or self.loss < self.critic_thresh: self.gan_trainer.switch()
# Cell
class GANDiscriminativeLR(Callback):
"`Callback` that handles multiplying the learning rate by `mult_lr` for the critic."
run_after = GANTrainer
def __init__(self, mult_lr=5.): self.mult_lr = mult_lr
def before_batch(self):
"Multiply the current lr if necessary."
if not self.learn.gan_trainer.gen_mode and self.training:
self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']*self.mult_lr)
def after_batch(self):
"Put the LR back to its value if necessary."
if not self.learn.gan_trainer.gen_mode: self.learn.opt.set_hyper('lr', self.learn.opt.hypers[0]['lr']/self.mult_lr)
# Cell
class InvisibleTensor(TensorBase):
def show(self, ctx=None, **kwargs): return ctx
# Cell
def generate_noise(fn, size=100): return cast(torch.randn(size), InvisibleTensor)
# Cell
@typedispatch
def show_batch(x:InvisibleTensor, y:TensorImage, samples, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, figsize=figsize)
ctxs = show_batch[object](x, y, samples, ctxs=ctxs, max_n=max_n, **kwargs)
return ctxs
# Cell
@typedispatch
def show_results(x:InvisibleTensor, y:TensorImage, samples, outs, ctxs=None, max_n=10, nrows=None, ncols=None, figsize=None, **kwargs):
if ctxs is None: ctxs = get_grid(min(len(samples), max_n), nrows=nrows, ncols=ncols, add_vert=1, figsize=figsize)
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
return ctxs
# Cell
def gan_loss_from_func(loss_gen, loss_crit, weights_gen=None):
"Define loss functions for a GAN from `loss_gen` and `loss_crit`."
def _loss_G(fake_pred, output, target, weights_gen=weights_gen):
ones = fake_pred.new_ones(fake_pred.shape[0])
weights_gen = ifnone(weights_gen, (1.,1.))
return weights_gen[0] * loss_crit(fake_pred, ones) + weights_gen[1] * loss_gen(output, target)
def _loss_C(real_pred, fake_pred):
ones = real_pred.new_ones (real_pred.shape[0])
zeros = fake_pred.new_zeros(fake_pred.shape[0])
return (loss_crit(real_pred, ones) + loss_crit(fake_pred, zeros)) / 2
return _loss_G, _loss_C
# Cell
def _tk_mean(fake_pred, output, target): return fake_pred.mean()
def _tk_diff(real_pred, fake_pred): return real_pred.mean() - fake_pred.mean()
# Cell
@delegates()
class GANLearner(Learner):
"A `Learner` suitable for GANs."
def __init__(self, dls, generator, critic, gen_loss_func, crit_loss_func, switcher=None, gen_first=False,
switch_eval=True, show_img=True, clip=None, cbs=None, metrics=None, **kwargs):
gan = GANModule(generator, critic)
loss_func = GANLoss(gen_loss_func, crit_loss_func, gan)
if switcher is None: switcher = FixedGANSwitcher(n_crit=5, n_gen=1)
trainer = GANTrainer(clip=clip, switch_eval=switch_eval, gen_first=gen_first, show_img=show_img)
cbs = L(cbs) + L(trainer, switcher)
metrics = L(metrics) + L(*LossMetrics('gen_loss,crit_loss'))
super().__init__(dls, gan, loss_func=loss_func, cbs=cbs, metrics=metrics, **kwargs)
@classmethod
def from_learners(cls, gen_learn, crit_learn, switcher=None, weights_gen=None, **kwargs):
"Create a GAN from `learn_gen` and `learn_crit`."
losses = gan_loss_from_func(gen_learn.loss_func, crit_learn.loss_func, weights_gen=weights_gen)
return cls(gen_learn.dls, gen_learn.model, crit_learn.model, *losses, switcher=switcher, **kwargs)
@classmethod
def wgan(cls, dls, generator, critic, switcher=None, clip=0.01, switch_eval=False, **kwargs):
"Create a WGAN from `data`, `generator` and `critic`."
return cls(dls, generator, critic, _tk_mean, _tk_diff, switcher=switcher, clip=clip, switch_eval=switch_eval, **kwargs)
GANLearner.from_learners = delegates(to=GANLearner.__init__)(GANLearner.from_learners)
GANLearner.wgan = delegates(to=GANLearner.__init__)(GANLearner.wgan)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/gan.py
|
gan.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15a_vision.models.unet.ipynb (unless otherwise specified).
__all__ = ['UnetBlock', 'ResizeToOrig', 'DynamicUnet']
# Cell
from ...torch_basics import *
from ...callback.hook import *
# Cell
def _get_sz_change_idxs(sizes):
"Get the indexes of the layers where the size of the activation changes."
feature_szs = [size[-1] for size in sizes]
sz_chg_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0])
return sz_chg_idxs
# Cell
class UnetBlock(Module):
"A quasi-UNet block, using `PixelShuffle_ICNR upsampling`."
@delegates(ConvLayer.__init__)
def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation,
self_attention=False, init=nn.init.kaiming_normal_, norm_type=None, **kwargs):
self.hook = hook
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls, norm_type=norm_type)
self.bn = BatchNorm(x_in_c)
ni = up_in_c//2 + x_in_c
nf = ni if final_div else ni//2
self.conv1 = ConvLayer(ni, nf, act_cls=act_cls, norm_type=norm_type, **kwargs)
self.conv2 = ConvLayer(nf, nf, act_cls=act_cls, norm_type=norm_type,
xtra=SelfAttention(nf) if self_attention else None, **kwargs)
self.relu = act_cls()
apply_init(nn.Sequential(self.conv1, self.conv2), init)
def forward(self, up_in):
s = self.hook.stored
up_out = self.shuf(up_in)
ssh = s.shape[-2:]
if ssh != up_out.shape[-2:]:
up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest')
cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))
return self.conv2(self.conv1(cat_x))
# Cell
class ResizeToOrig(Module):
"Merge a shortcut with the result of the module by adding them or concatenating them if `dense=True`."
def __init__(self, mode='nearest'): self.mode = mode
def forward(self, x):
if x.orig.shape[-2:] != x.shape[-2:]:
x = F.interpolate(x, x.orig.shape[-2:], mode=self.mode)
return x
# Cell
class DynamicUnet(SequentialEx):
"Create a U-Net from a given architecture."
def __init__(self, encoder, n_out, img_size, blur=False, blur_final=True, self_attention=False,
y_range=None, last_cross=True, bottle=False, act_cls=defaults.activation,
init=nn.init.kaiming_normal_, norm_type=None, **kwargs):
imsize = img_size
sizes = model_sizes(encoder, size=imsize)
sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs], detach=False)
x = dummy_eval(encoder, imsize).detach()
ni = sizes[-1][1]
middle_conv = nn.Sequential(ConvLayer(ni, ni*2, act_cls=act_cls, norm_type=norm_type, **kwargs),
ConvLayer(ni*2, ni, act_cls=act_cls, norm_type=norm_type, **kwargs)).eval()
x = middle_conv(x)
layers = [encoder, BatchNorm(ni), nn.ReLU(), middle_conv]
for i,idx in enumerate(sz_chg_idxs):
not_final = i!=len(sz_chg_idxs)-1
up_in_c, x_in_c = int(x.shape[1]), int(sizes[idx][1])
do_blur = blur and (not_final or blur_final)
sa = self_attention and (i==len(sz_chg_idxs)-3)
unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa,
act_cls=act_cls, init=init, norm_type=norm_type, **kwargs).eval()
layers.append(unet_block)
x = unet_block(x)
ni = x.shape[1]
if imsize != sizes[0][-2:]: layers.append(PixelShuffle_ICNR(ni, act_cls=act_cls, norm_type=norm_type))
layers.append(ResizeToOrig())
if last_cross:
layers.append(MergeLayer(dense=True))
ni += in_channels(encoder)
layers.append(ResBlock(1, ni, ni//2 if bottle else ni, act_cls=act_cls, norm_type=norm_type, **kwargs))
layers += [ConvLayer(ni, n_out, ks=1, act_cls=None, norm_type=norm_type, **kwargs)]
apply_init(nn.Sequential(layers[3], layers[-2]), init)
#apply_init(nn.Sequential(layers[2]), init)
if y_range is not None: layers.append(SigmoidRange(*y_range))
super().__init__(*layers)
def __del__(self):
if hasattr(self, "sfs"): self.sfs.remove()
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/models/unet.py
|
unet.py
|
from .xresnet import *
from .unet import *
from .tvm import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/models/all.py
|
all.py
|
from torchvision.models import ResNet,resnet18,resnet34,resnet50,resnet101,resnet152
from torchvision.models import SqueezeNet,squeezenet1_0,squeezenet1_1
from torchvision.models import densenet121,densenet169,densenet201,densenet161
from torchvision.models import vgg11_bn,vgg13_bn,vgg16_bn,vgg19_bn,alexnet
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/models/tvm.py
|
tvm.py
|
from . import xresnet
from . import unet
from .tvm import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/models/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/11_vision.models.xresnet.ipynb (unless otherwise specified).
__all__ = ['init_cnn', 'XResNet', 'xresnet18', 'xresnet34', 'xresnet50', 'xresnet101', 'xresnet152', 'xresnet18_deep',
'xresnet34_deep', 'xresnet50_deep', 'xresnet18_deeper', 'xresnet34_deeper', 'xresnet50_deeper', 'se_kwargs1',
'se_kwargs2', 'se_kwargs3', 'g0', 'g1', 'g2', 'g3', 'xse_resnet18', 'xse_resnext18', 'xresnext18',
'xse_resnet34', 'xse_resnext34', 'xresnext34', 'xse_resnet50', 'xse_resnext50', 'xresnext50',
'xse_resnet101', 'xse_resnext101', 'xresnext101', 'xse_resnet152', 'xsenet154', 'xse_resnext18_deep',
'xse_resnext34_deep', 'xse_resnext50_deep', 'xse_resnext18_deeper', 'xse_resnext34_deeper',
'xse_resnext50_deeper']
# Cell
from ...torch_basics import *
from torchvision.models.utils import load_state_dict_from_url
# Cell
def init_cnn(m):
if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0)
if isinstance(m, (nn.Conv1d,nn.Conv2d,nn.Conv3d,nn.Linear)): nn.init.kaiming_normal_(m.weight)
for l in m.children(): init_cnn(l)
# Cell
class XResNet(nn.Sequential):
@delegates(ResBlock)
def __init__(self, block, expansion, layers, p=0.0, c_in=3, n_out=1000, stem_szs=(32,32,64),
widen=1.0, sa=False, act_cls=defaults.activation, ndim=2, ks=3, stride=2, **kwargs):
store_attr('block,expansion,act_cls,ndim,ks')
if ks % 2 == 0: raise Exception('kernel size has to be odd!')
stem_szs = [c_in, *stem_szs]
stem = [ConvLayer(stem_szs[i], stem_szs[i+1], ks=ks, stride=stride if i==0 else 1,
act_cls=act_cls, ndim=ndim)
for i in range(3)]
block_szs = [int(o*widen) for o in [64,128,256,512] +[256]*(len(layers)-4)]
block_szs = [64//expansion] + block_szs
blocks = self._make_blocks(layers, block_szs, sa, stride, **kwargs)
super().__init__(
*stem, MaxPool(ks=ks, stride=stride, padding=ks//2, ndim=ndim),
*blocks,
AdaptiveAvgPool(sz=1, ndim=ndim), Flatten(), nn.Dropout(p),
nn.Linear(block_szs[-1]*expansion, n_out),
)
init_cnn(self)
def _make_blocks(self, layers, block_szs, sa, stride, **kwargs):
return [self._make_layer(ni=block_szs[i], nf=block_szs[i+1], blocks=l,
stride=1 if i==0 else stride, sa=sa and i==len(layers)-4, **kwargs)
for i,l in enumerate(layers)]
def _make_layer(self, ni, nf, blocks, stride, sa, **kwargs):
return nn.Sequential(
*[self.block(self.expansion, ni if i==0 else nf, nf, stride=stride if i==0 else 1,
sa=sa and i==(blocks-1), act_cls=self.act_cls, ndim=self.ndim, ks=self.ks, **kwargs)
for i in range(blocks)])
# Cell
def _xresnet(pretrained, expansion, layers, **kwargs):
# TODO pretrain all sizes. Currently will fail with non-xrn50
url = 'https://s3.amazonaws.com/fast-ai-modelzoo/xrn50_940.pth'
res = XResNet(ResBlock, expansion, layers, **kwargs)
if pretrained: res.load_state_dict(load_state_dict_from_url(url, map_location='cpu')['model'], strict=False)
return res
def xresnet18 (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2, 2, 2, 2], **kwargs)
def xresnet34 (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3, 4, 6, 3], **kwargs)
def xresnet50 (pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 4, 6, 3], **kwargs)
def xresnet101(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 4, 23, 3], **kwargs)
def xresnet152(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3, 8, 36, 3], **kwargs)
def xresnet18_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2,2,2,2,1,1], **kwargs)
def xresnet34_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3,4,6,3,1,1], **kwargs)
def xresnet50_deep (pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3,4,6,3,1,1], **kwargs)
def xresnet18_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 1, [2,2,1,1,1,1,1,1], **kwargs)
def xresnet34_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 1, [3,4,6,3,1,1,1,1], **kwargs)
def xresnet50_deeper(pretrained=False, **kwargs): return _xresnet(pretrained, 4, [3,4,6,3,1,1,1,1], **kwargs)
# Cell
se_kwargs1 = dict(groups=1 , reduction=16)
se_kwargs2 = dict(groups=32, reduction=16)
se_kwargs3 = dict(groups=32, reduction=0)
g0 = [2,2,2,2]
g1 = [3,4,6,3]
g2 = [3,4,23,3]
g3 = [3,8,36,3]
# Cell
def xse_resnet18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 1, g0, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext18(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 1, g1, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext34(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g1, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext50(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g2, n_out=n_out, **se_kwargs1, **kwargs)
def xse_resnext101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g2, n_out=n_out, **se_kwargs2, **kwargs)
def xresnext101(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g2, n_out=n_out, **se_kwargs3, **kwargs)
def xse_resnet152(n_out=1000, pretrained=False, **kwargs): return XResNet(SEBlock, 4, g3, n_out=n_out, **se_kwargs1, **kwargs)
def xsenet154(n_out=1000, pretrained=False, **kwargs):
return XResNet(SEBlock, g3, groups=64, reduction=16, p=0.2, n_out=n_out)
def xse_resnext18_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g0+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext34_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, g1+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext50_deep (n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, g1+[1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext18_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, [2,2,1,1,1,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext34_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 1, [3,4,4,2,2,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
def xse_resnext50_deeper(n_out=1000, pretrained=False, **kwargs): return XResNet(SEResNeXtBlock, 4, [3,4,4,2,2,1,1,1], n_out=n_out, **se_kwargs2, **kwargs)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/vision/models/xresnet.py
|
xresnet.py
|
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from .transforms import *
from .block import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/all.py
|
all.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04_data.external.ipynb (unless otherwise specified).
__all__ = ['Config', 'URLs', 'download_url', 'download_data', 'file_extract', 'newest_folder', 'rename_extracted',
'untar_data']
# Cell
from ..torch_basics import *
# Cell
class Config:
"Setup config at `~/.fastai` unless it exists already."
config_path = Path(os.getenv('FASTAI_HOME', '~/.fastai')).expanduser()
config_file = config_path/'config.yml'
def __init__(self):
self.config_path.mkdir(parents=True, exist_ok=True)
if not self.config_file.exists(): self.create_config()
self.d = self.load_config()
def __getitem__(self,k):
k = k.lower()
if k not in self.d: k = k+'_path'
return Path(self.d[k])
def __getattr__(self,k):
if k=='d': raise AttributeError
return self[k]
def __setitem__(self,k,v): self.d[k] = str(v)
def __contains__(self,k): return k in self.d
def load_config(self):
"load and return config if version equals 2 in existing, else create new config."
with open(self.config_file, 'r') as f:
config = yaml.safe_load(f)
if 'version' in config and config['version'] == 2: return config
elif 'version' in config: self.create_config(config)
else: self.create_config()
return self.load_config()
def create_config(self, cfg=None):
"create new config with default paths and set `version` to 2."
config = {'data_path': str(self.config_path/'data'),
'archive_path': str(self.config_path/'archive'),
'storage_path': '/tmp',
'model_path': str(self.config_path/'models'),
'version': 2}
if cfg is not None:
cfg['version'] = 2
config = merge(config, cfg)
self.save_file(config)
def save(self): self.save_file(self.d)
def save_file(self, config):
"save config file at default config location `~/.fastai/config.yml`."
with self.config_file.open('w') as f: yaml.dump(config, f, default_flow_style=False)
# Cell
class URLs():
"Global constants for dataset and model URLs."
LOCAL_PATH = Path.cwd()
MDL = 'http://files.fast.ai/models/'
S3 = 'https://s3.amazonaws.com/fast-ai-'
URL = f'{S3}sample/'
S3_IMAGE = f'{S3}imageclas/'
S3_IMAGELOC = f'{S3}imagelocal/'
S3_AUDI = f'{S3}audio/'
S3_NLP = f'{S3}nlp/'
S3_COCO = f'{S3}coco/'
S3_MODEL = f'{S3}modelzoo/'
# main datasets
ADULT_SAMPLE = f'{URL}adult_sample.tgz'
BIWI_SAMPLE = f'{URL}biwi_sample.tgz'
CIFAR = f'{URL}cifar10.tgz'
COCO_SAMPLE = f'{S3_COCO}coco_sample.tgz'
COCO_TINY = f'{S3_COCO}coco_tiny.tgz'
HUMAN_NUMBERS = f'{URL}human_numbers.tgz'
IMDB = f'{S3_NLP}imdb.tgz'
IMDB_SAMPLE = f'{URL}imdb_sample.tgz'
ML_SAMPLE = f'{URL}movie_lens_sample.tgz'
ML_100k = 'http://files.grouplens.org/datasets/movielens/ml-100k.zip'
MNIST_SAMPLE = f'{URL}mnist_sample.tgz'
MNIST_TINY = f'{URL}mnist_tiny.tgz'
MNIST_VAR_SIZE_TINY = f'{S3_IMAGE}mnist_var_size_tiny.tgz'
PLANET_SAMPLE = f'{URL}planet_sample.tgz'
PLANET_TINY = f'{URL}planet_tiny.tgz'
IMAGENETTE = f'{S3_IMAGE}imagenette2.tgz'
IMAGENETTE_160 = f'{S3_IMAGE}imagenette2-160.tgz'
IMAGENETTE_320 = f'{S3_IMAGE}imagenette2-320.tgz'
IMAGEWOOF = f'{S3_IMAGE}imagewoof2.tgz'
IMAGEWOOF_160 = f'{S3_IMAGE}imagewoof2-160.tgz'
IMAGEWOOF_320 = f'{S3_IMAGE}imagewoof2-320.tgz'
IMAGEWANG = f'{S3_IMAGE}imagewang.tgz'
IMAGEWANG_160 = f'{S3_IMAGE}imagewang-160.tgz'
IMAGEWANG_320 = f'{S3_IMAGE}imagewang-320.tgz'
# kaggle competitions download dogs-vs-cats -p {DOGS.absolute()}
DOGS = f'{URL}dogscats.tgz'
# image classification datasets
CALTECH_101 = f'{S3_IMAGE}caltech_101.tgz'
CARS = f'{S3_IMAGE}stanford-cars.tgz'
CIFAR_100 = f'{S3_IMAGE}cifar100.tgz'
CUB_200_2011 = f'{S3_IMAGE}CUB_200_2011.tgz'
FLOWERS = f'{S3_IMAGE}oxford-102-flowers.tgz'
FOOD = f'{S3_IMAGE}food-101.tgz'
MNIST = f'{S3_IMAGE}mnist_png.tgz'
PETS = f'{S3_IMAGE}oxford-iiit-pet.tgz'
# NLP datasets
AG_NEWS = f'{S3_NLP}ag_news_csv.tgz'
AMAZON_REVIEWS = f'{S3_NLP}amazon_review_full_csv.tgz'
AMAZON_REVIEWS_POLARITY = f'{S3_NLP}amazon_review_polarity_csv.tgz'
DBPEDIA = f'{S3_NLP}dbpedia_csv.tgz'
MT_ENG_FRA = f'{S3_NLP}giga-fren.tgz'
SOGOU_NEWS = f'{S3_NLP}sogou_news_csv.tgz'
WIKITEXT = f'{S3_NLP}wikitext-103.tgz'
WIKITEXT_TINY = f'{S3_NLP}wikitext-2.tgz'
YAHOO_ANSWERS = f'{S3_NLP}yahoo_answers_csv.tgz'
YELP_REVIEWS = f'{S3_NLP}yelp_review_full_csv.tgz'
YELP_REVIEWS_POLARITY = f'{S3_NLP}yelp_review_polarity_csv.tgz'
# Image localization datasets
BIWI_HEAD_POSE = f"{S3_IMAGELOC}biwi_head_pose.tgz"
CAMVID = f'{S3_IMAGELOC}camvid.tgz'
CAMVID_TINY = f'{URL}camvid_tiny.tgz'
LSUN_BEDROOMS = f'{S3_IMAGE}bedroom.tgz'
PASCAL_2007 = f'{S3_IMAGELOC}pascal_2007.tgz'
PASCAL_2012 = f'{S3_IMAGELOC}pascal_2012.tgz'
# Audio classification datasets
MACAQUES = 'https://storage.googleapis.com/ml-animal-sounds-datasets/macaques.zip'
ZEBRA_FINCH = 'https://storage.googleapis.com/ml-animal-sounds-datasets/zebra_finch.zip'
# Medical Imaging datasets
#SKIN_LESION = f'{S3_IMAGELOC}skin_lesion.tgz'
SIIM_SMALL = f'{S3_IMAGELOC}siim_small.tgz'
TCGA_SMALL = f'{S3_IMAGELOC}tcga_small.tgz'
#Pretrained models
OPENAI_TRANSFORMER = f'{S3_MODEL}transformer.tgz'
WT103_FWD = f'{S3_MODEL}wt103-fwd.tgz'
WT103_BWD = f'{S3_MODEL}wt103-bwd.tgz'
def path(url='.', c_key='archive'):
"Return local path where to download based on `c_key`"
fname = url.split('/')[-1]
local_path = URLs.LOCAL_PATH/('models' if c_key=='models' else 'data')/fname
if local_path.exists(): return local_path
return Config()[c_key]/fname
# Cell
def download_url(url, dest, overwrite=False, pbar=None, show_progress=True, chunk_size=1024*1024,
timeout=4, retries=5):
"Download `url` to `dest` unless it exists and not `overwrite`"
if os.path.exists(dest) and not overwrite: return
s = requests.Session()
s.mount('http://',requests.adapters.HTTPAdapter(max_retries=retries))
# additional line to identify as a firefox browser, see fastai/#2438
s.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'})
u = s.get(url, stream=True, timeout=timeout)
try: file_size = int(u.headers["Content-Length"])
except: show_progress = False
with open(dest, 'wb') as f:
nbytes = 0
if show_progress: pbar = progress_bar(range(file_size), leave=False, parent=pbar)
try:
if show_progress: pbar.update(0)
for chunk in u.iter_content(chunk_size=chunk_size):
nbytes += len(chunk)
if show_progress: pbar.update(nbytes)
f.write(chunk)
except requests.exceptions.ConnectionError as e:
fname = url.split('/')[-1]
data_dir = dest.parent
print(f'\n Download of {url} has failed after {retries} retries\n'
f' Fix the download manually:\n'
f'$ mkdir -p {data_dir}\n'
f'$ cd {data_dir}\n'
f'$ wget -c {url}\n'
f'$ tar xf {fname}\n'
f' And re-run your code once the download is successful\n')
# Cell
def download_data(url, fname=None, c_key='archive', force_download=False, timeout=4):
"Download `url` to `fname`."
fname = Path(fname or URLs.path(url, c_key=c_key))
fname.parent.mkdir(parents=True, exist_ok=True)
if not fname.exists() or force_download: download_url(url, fname, overwrite=force_download, timeout=timeout)
return fname
# Cell
def _get_check(url):
"internal function to get the hash of the file at `url`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
return checks.get(url, '')
def _check_file(fname):
"internal function to get the hash of the local file at `fname`."
size = os.path.getsize(fname)
with open(fname, "rb") as f: hash_nb = hashlib.md5(f.read(2**20)).hexdigest()
return [size,hash_nb]
# Cell
def _add_check(url, fname):
"Internal function to update the internal check file with `url` and check on `fname`."
checks = json.load(open(Path(__file__).parent/'checks.txt', 'r'))
checks[url] = _check_file(fname)
json.dump(checks, open(Path(__file__).parent/'checks.txt', 'w'), indent=2)
# Cell
def file_extract(fname, dest=None):
"Extract `fname` to `dest` using `tarfile` or `zipfile`."
if dest is None: dest = Path(fname).parent
fname = str(fname)
if fname.endswith('gz'): tarfile.open(fname, 'r:gz').extractall(dest)
elif fname.endswith('zip'): zipfile.ZipFile(fname ).extractall(dest)
else: raise Exception(f'Unrecognized archive: {fname}')
# Cell
def _try_from_storage(dest, storage):
"an internal function to create symbolic links for files from `storage` to `dest` if `storage` exists"
if not storage.exists(): return
os.makedirs(dest, exist_ok=True)
for f in storage.glob('*'): os.symlink(f, dest/f.name, target_is_directory=f.is_dir())
# Cell
def newest_folder(path):
"Return newest folder on path"
list_of_paths = path.glob('*')
return max(list_of_paths, key=lambda p: p.stat().st_ctime)
# Cell
def rename_extracted(dest):
"Rename file if different from dest"
extracted = newest_folder(dest.parent)
if not (extracted.name == dest.name): extracted.rename(dest)
# Cell
def untar_data(url, fname=None, dest=None, c_key='data', force_download=False, extract_func=file_extract, timeout=4):
"Download `url` to `fname` if `dest` doesn't exist, and un-tgz or unzip to folder `dest`."
default_dest = URLs.path(url, c_key=c_key).with_suffix('')
dest = default_dest if dest is None else Path(dest)/default_dest.name
fname = Path(fname or URLs.path(url))
if fname.exists() and _get_check(url) and _check_file(fname) != _get_check(url):
print("A new version of this dataset is available, downloading...")
force_download = True
if force_download:
if fname.exists(): os.remove(fname)
if dest.exists(): shutil.rmtree(dest)
if not dest.exists(): _try_from_storage(dest, URLs.path(url, c_key='storage').with_suffix(''))
if not dest.exists():
fname = download_data(url, fname=fname, c_key=c_key, timeout=timeout)
if _get_check(url) and _check_file(fname) != _get_check(url):
print(f"File downloaded is broken. Remove {fname} and try again.")
extract_func(fname, dest.parent)
rename_extracted(dest)
return dest
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/external.py
|
external.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_data.core.ipynb (unless otherwise specified).
__all__ = ['show_batch', 'show_results', 'TfmdDL', 'DataLoaders', 'FilteredBase', 'TfmdLists', 'decode_at', 'show_at',
'Datasets', 'test_set']
# Cell
from ..torch_basics import *
from .load import *
# Cell
@typedispatch
def show_batch(x, y, samples, ctxs=None, max_n=9, **kwargs):
if ctxs is None: ctxs = Inf.nones
if hasattr(samples[0], 'show'):
ctxs = [s.show(ctx=c, **kwargs) for s,c,_ in zip(samples,ctxs,range(max_n))]
else:
for i in range_of(samples[0]):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
return ctxs
# Cell
@typedispatch
def show_results(x, y, samples, outs, ctxs=None, max_n=9, **kwargs):
if ctxs is None: ctxs = Inf.nones
for i in range(len(samples[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
for i in range(len(outs[0])):
ctxs = [b.show(ctx=c, **kwargs) for b,c,_ in zip(outs.itemgot(i),ctxs,range(max_n))]
return ctxs
# Cell
#nbdev_comment _all_ = ["show_batch", "show_results"]
# Cell
_batch_tfms = ('after_item','before_batch','after_batch')
# Cell
@delegates()
class TfmdDL(DataLoader):
"Transformed `DataLoader`"
def __init__(self, dataset, bs=64, shuffle=False, num_workers=None, verbose=False, do_setup=True, **kwargs):
if num_workers is None: num_workers = min(16, defaults.cpus)
for nm in _batch_tfms: kwargs[nm] = Pipeline(kwargs.get(nm,None))
super().__init__(dataset, bs=bs, shuffle=shuffle, num_workers=num_workers, **kwargs)
if do_setup:
for nm in _batch_tfms:
pv(f"Setting up {nm}: {kwargs[nm]}", verbose)
kwargs[nm].setup(self)
def _one_pass(self):
b = self.do_batch([self.do_item(0)])
if self.device is not None: b = to_device(b, self.device)
its = self.after_batch(b)
self._n_inp = 1 if not isinstance(its, (list,tuple)) or len(its)==1 else len(its)-1
self._types = explode_types(its)
def _retain_dl(self,b):
if not getattr(self, '_types', None): self._one_pass()
return retain_types(b, typs=self._types)
@delegates(DataLoader.new)
def new(self, dataset=None, cls=None, **kwargs):
res = super().new(dataset, cls, do_setup=False, **kwargs)
if not hasattr(self, '_n_inp') or not hasattr(self, '_types'):
try:
self._one_pass()
res._n_inp,res._types = self._n_inp,self._types
except: print("Could not do one pass in your dataloader, there is something wrong in it")
else: res._n_inp,res._types = self._n_inp,self._types
return res
def before_iter(self):
super().before_iter()
split_idx = getattr(self.dataset, 'split_idx', None)
for nm in _batch_tfms:
f = getattr(self,nm)
if isinstance(f,Pipeline): f.split_idx=split_idx
def decode(self, b): return to_cpu(self.after_batch.decode(self._retain_dl(b)))
def decode_batch(self, b, max_n=9, full=True): return self._decode_batch(self.decode(b), max_n, full)
def _decode_batch(self, b, max_n=9, full=True):
f = self.after_item.decode
f1 = self.before_batch.decode
f = compose(f1, f, partial(getattr(self.dataset,'decode',noop), full = full))
return L(batch_to_samples(b, max_n=max_n)).map(f)
def _pre_show_batch(self, b, max_n=9):
"Decode `b` to be ready for `show_batch`"
b = self.decode(b)
if hasattr(b, 'show'): return b,None,None
its = self._decode_batch(b, max_n, full=False)
if not is_listy(b): b,its = [b],L((o,) for o in its)
return detuplify(b[:self.n_inp]),detuplify(b[self.n_inp:]),its
def show_batch(self, b=None, max_n=9, ctxs=None, show=True, unique=False, **kwargs):
if unique:
old_get_idxs = self.get_idxs
self.get_idxs = lambda: Inf.zeros
if b is None: b = self.one_batch()
if not show: return self._pre_show_batch(b, max_n=max_n)
show_batch(*self._pre_show_batch(b, max_n=max_n), ctxs=ctxs, max_n=max_n, **kwargs)
if unique: self.get_idxs = old_get_idxs
def show_results(self, b, out, max_n=9, ctxs=None, show=True, **kwargs):
x,y,its = self.show_batch(b, max_n=max_n, show=False)
b_out = type(b)(b[:self.n_inp] + (tuple(out) if is_listy(out) else (out,)))
x1,y1,outs = self.show_batch(b_out, max_n=max_n, show=False)
res = (x,x1,None,None) if its is None else (x, y, its, outs.itemgot(slice(self.n_inp,None)))
if not show: return res
show_results(*res, ctxs=ctxs, max_n=max_n, **kwargs)
@property
def n_inp(self):
if hasattr(self.dataset, 'n_inp'): return self.dataset.n_inp
if not hasattr(self, '_n_inp'): self._one_pass()
return self._n_inp
def to(self, device):
self.device = device
for tfm in self.after_batch.fs:
for a in L(getattr(tfm, 'parameters', None)): setattr(tfm, a, getattr(tfm, a).to(device))
return self
# Cell
add_docs(TfmdDL,
decode="Decode `b` using `tfms`",
decode_batch="Decode `b` entirely",
new="Create a new version of self with a few changed attributes",
show_batch="Show `b` (defaults to `one_batch`), a list of lists of pipeline outputs (i.e. output of a `DataLoader`)",
show_results="Show each item of `b` and `out`",
before_iter="override",
to="Put self and its transforms state on `device`")
# Cell
@docs
class DataLoaders(GetAttr):
"Basic wrapper around several `DataLoader`s."
_default='train'
def __init__(self, *loaders, path='.', device=None):
self.loaders,self.path = list(loaders),Path(path)
if device is not None or hasattr(loaders[0],'to'): self.device = device
def __getitem__(self, i): return self.loaders[i]
def new_empty(self):
loaders = [dl.new(dl.dataset.new_empty()) for dl in self.loaders]
return type(self)(*loaders, path=self.path, device=self.device)
def _set(i, self, v): self.loaders[i] = v
train ,valid = add_props(lambda i,x: x[i], _set)
train_ds,valid_ds = add_props(lambda i,x: x[i].dataset)
@property
def device(self): return self._device
@device.setter
def device(self, d):
for dl in self.loaders: dl.to(d)
self._device = d
def to(self, device):
self.device = device
return self
def cuda(self): return self.to(device=default_device())
def cpu(self): return self.to(device=torch.device('cpu'))
@classmethod
def from_dsets(cls, *ds, path='.', bs=64, device=None, dl_type=TfmdDL, **kwargs):
default = (True,) + (False,) * (len(ds)-1)
defaults = {'shuffle': default, 'drop_last': default}
for nm in _batch_tfms:
if nm in kwargs: kwargs[nm] = Pipeline(kwargs[nm])
kwargs = merge(defaults, {k: tuplify(v, match=ds) for k,v in kwargs.items()})
kwargs = [{k: v[i] for k,v in kwargs.items()} for i in range_of(ds)]
return cls(*[dl_type(d, bs=bs, **k) for d,k in zip(ds, kwargs)], path=path, device=device)
@classmethod
def from_dblock(cls, dblock, source, path='.', bs=64, val_bs=None, shuffle_train=True, device=None, **kwargs):
return dblock.dataloaders(source, path=path, bs=bs, val_bs=val_bs, shuffle_train=shuffle_train, device=device, **kwargs)
_docs=dict(__getitem__="Retrieve `DataLoader` at `i` (`0` is training, `1` is validation)",
train="Training `DataLoader`",
valid="Validation `DataLoader`",
train_ds="Training `Dataset`",
valid_ds="Validation `Dataset`",
to="Use `device`",
cuda="Use the gpu if available",
cpu="Use the cpu",
new_empty="Create a new empty version of `self` with the same transforms",
from_dblock="Create a dataloaders from a given `dblock`")
# Cell
class FilteredBase:
"Base class for lists with subsets"
_dl_type,_dbunch_type = TfmdDL,DataLoaders
def __init__(self, *args, dl_type=None, **kwargs):
if dl_type is not None: self._dl_type = dl_type
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(*args, **kwargs)
@property
def n_subsets(self): return len(self.splits)
def _new(self, items, **kwargs): return super()._new(items, splits=self.splits, **kwargs)
def subset(self): raise NotImplemented
def dataloaders(self, bs=64, val_bs=None, shuffle_train=True, n=None, path='.', dl_type=None, dl_kwargs=None,
device=None, **kwargs):
if device is None: device=default_device()
if dl_kwargs is None: dl_kwargs = [{}] * self.n_subsets
if dl_type is None: dl_type = self._dl_type
drop_last = kwargs.pop('drop_last', shuffle_train)
dl = dl_type(self.subset(0), bs=bs, shuffle=shuffle_train, drop_last=drop_last, n=n, device=device,
**merge(kwargs, dl_kwargs[0]))
dls = [dl] + [dl.new(self.subset(i), bs=(bs if val_bs is None else val_bs), shuffle=False, drop_last=False,
n=None, **dl_kwargs[i]) for i in range(1, self.n_subsets)]
return self._dbunch_type(*dls, path=path, device=device)
FilteredBase.train,FilteredBase.valid = add_props(lambda i,x: x.subset(i))
# Cell
class TfmdLists(FilteredBase, L, GetAttr):
"A `Pipeline` of `tfms` applied to a collection of `items`"
_default='tfms'
def __init__(self, items, tfms, use_list=None, do_setup=True, split_idx=None, train_setup=True,
splits=None, types=None, verbose=False, dl_type=None):
super().__init__(items, use_list=use_list)
if dl_type is not None: self._dl_type = dl_type
self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs)
if isinstance(tfms,TfmdLists): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, split_idx=split_idx)
store_attr('types,split_idx')
if do_setup:
pv(f"Setting up {self.tfms}", verbose)
self.setup(train_setup=train_setup)
def _new(self, items, split_idx=None, **kwargs):
split_idx = ifnone(split_idx,self.split_idx)
return super()._new(items, tfms=self.tfms, do_setup=False, types=self.types, split_idx=split_idx, **kwargs)
def subset(self, i): return self._new(self._get(self.splits[i]), split_idx=i)
def _after_item(self, o): return self.tfms(o)
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
def __iter__(self): return (self[i] for i in range(len(self)))
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def decode(self, o, **kwargs): return self.tfms.decode(o, **kwargs)
def __call__(self, o, **kwargs): return self.tfms.__call__(o, **kwargs)
def overlapping_splits(self): return L(Counter(self.splits.concat()).values()).filter(gt(1))
def new_empty(self): return self._new([])
def setup(self, train_setup=True):
self.tfms.setup(self, train_setup)
if len(self) != 0:
x = super().__getitem__(0) if self.splits is None else super().__getitem__(self.splits[0])[0]
self.types = []
for f in self.tfms.fs:
self.types.append(getattr(f, 'input_types', type(x)))
x = f(x)
self.types.append(type(x))
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
self.pretty_types = '\n'.join([f' - {t}' for t in types])
def infer_idx(self, x):
# TODO: check if we really need this, or can simplify
idx = 0
for t in self.types:
if isinstance(x, t): break
idx += 1
types = L(t if is_listy(t) else [t] for t in self.types).concat().unique()
pretty_types = '\n'.join([f' - {t}' for t in types])
assert idx < len(self.types), f"Expected an input of type in \n{pretty_types}\n but got {type(x)}"
return idx
def infer(self, x):
return compose_tfms(x, tfms=self.tfms.fs[self.infer_idx(x):], split_idx=self.split_idx)
def __getitem__(self, idx):
res = super().__getitem__(idx)
if self._after_item is None: return res
return self._after_item(res) if is_indexer(idx) else res.map(self._after_item)
# Cell
add_docs(TfmdLists,
setup="Transform setup with self",
decode="From `Pipeline`",
show="From `Pipeline`",
overlapping_splits="All splits that are in more than one split",
subset="New `TfmdLists` with same tfms that only includes items in `i`th split",
infer_idx="Finds the index where `self.tfms` can be applied to `x`, depending on the type of `x`",
infer="Apply `self.tfms` to `x` starting at the right tfm depending on the type of `x`",
new_empty="A new version of `self` but with no items")
# Cell
def decode_at(o, idx):
"Decoded item at `idx`"
return o.decode(o[idx])
# Cell
def show_at(o, idx, **kwargs):
"Show item at `idx`",
return o.show(o[idx], **kwargs)
# Cell
@docs
@delegates(TfmdLists)
class Datasets(FilteredBase):
"A dataset that creates a tuple from each `tfms`, passed through `item_tfms`"
def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
super().__init__(dl_type=dl_type)
self.tls = L(tls if tls else [TfmdLists(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
self.n_inp = ifnone(n_inp, max(1, len(self.tls)-1))
def __getitem__(self, it):
res = tuple([tl[it] for tl in self.tls])
return res if is_indexer(it) else list(zip(*res))
def __getattr__(self,k): return gather_attrs(self, k, 'tls')
def __dir__(self): return super().__dir__() + gather_attr_names(self, 'tls')
def __len__(self): return len(self.tls[0])
def __iter__(self): return (self[i] for i in range(len(self)))
def __repr__(self): return coll_repr(self)
def decode(self, o, full=True): return tuple(tl.decode(o_, full=full) for o_,tl in zip(o,tuplify(self.tls, match=o)))
def subset(self, i): return type(self)(tls=L(tl.subset(i) for tl in self.tls), n_inp=self.n_inp)
def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, **kwargs)
def overlapping_splits(self): return self.tls[0].overlapping_splits()
def new_empty(self): return type(self)(tls=[tl.new_empty() for tl in self.tls], n_inp=self.n_inp)
@property
def splits(self): return self.tls[0].splits
@property
def split_idx(self): return self.tls[0].tfms.split_idx
@property
def items(self): return self.tls[0].items
@items.setter
def items(self, v):
for tl in self.tls: tl.items = v
def show(self, o, ctx=None, **kwargs):
for o_,tl in zip(o,self.tls): ctx = tl.show(o_, ctx=ctx, **kwargs)
return ctx
@contextmanager
def set_split_idx(self, i):
old_split_idx = self.split_idx
for tl in self.tls: tl.tfms.split_idx = i
try: yield self
finally:
for tl in self.tls: tl.tfms.split_idx = old_split_idx
_docs=dict(
decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`",
show="Show item `o` in `ctx`",
dataloaders="Get a `DataLoaders`",
overlapping_splits="All splits that are in more than one split",
subset="New `Datasets` that only includes subset `i`",
new_empty="Create a new empty version of the `self`, keeping only the transforms",
set_split_idx="Contextmanager to use the same `Datasets` with another `split_idx`"
)
# Cell
def test_set(dsets, test_items, rm_tfms=None, with_labels=False):
"Create a test set from `test_items` using validation transforms of `dsets`"
if isinstance(dsets, Datasets):
tls = dsets.tls if with_labels else dsets.tls[:dsets.n_inp]
test_tls = [tl._new(test_items, split_idx=1) for tl in tls]
if rm_tfms is None: rm_tfms = [tl.infer_idx(get_first(test_items)) for tl in test_tls]
else: rm_tfms = tuplify(rm_tfms, match=test_tls)
for i,j in enumerate(rm_tfms): test_tls[i].tfms.fs = test_tls[i].tfms.fs[j:]
return Datasets(tls=test_tls)
elif isinstance(dsets, TfmdLists):
test_tl = dsets._new(test_items, split_idx=1)
if rm_tfms is None: rm_tfms = dsets.infer_idx(get_first(test_items))
test_tl.tfms.fs = test_tl.tfms.fs[rm_tfms:]
return test_tl
else: raise Exception(f"This method requires using the fastai library to assemble your data. Expected a `Datasets` or a `TfmdLists` but got {dsets.__class__.__name__}")
# Cell
@patch
@delegates(TfmdDL.__init__)
def test_dl(self:DataLoaders, test_items, rm_type_tfms=None, with_labels=False, **kwargs):
"Create a test dataloader from `test_items` using validation transforms of `dls`"
test_ds = test_set(self.valid_ds, test_items, rm_tfms=rm_type_tfms, with_labels=with_labels
) if isinstance(self.valid_ds, (Datasets, TfmdLists)) else test_items
return self.valid.new(test_ds, **kwargs)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/core.py
|
core.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_data.transforms.ipynb (unless otherwise specified).
__all__ = ['get_files', 'FileGetter', 'image_extensions', 'get_image_files', 'ImageGetter', 'get_text_files',
'ItemGetter', 'AttrGetter', 'RandomSplitter', 'TrainTestSplitter', 'IndexSplitter', 'GrandparentSplitter',
'FuncSplitter', 'MaskSplitter', 'FileSplitter', 'ColSplitter', 'RandomSubsetSplitter', 'parent_label',
'RegexLabeller', 'ColReader', 'CategoryMap', 'Categorize', 'Category', 'MultiCategorize', 'MultiCategory',
'OneHotEncode', 'EncodedMultiCategorize', 'RegressionSetup', 'get_c', 'ToTensor', 'IntToFloatTensor',
'broadcast_vec', 'Normalize']
# Cell
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from sklearn.model_selection import train_test_split
# Cell
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# Cell
def get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
if len(folders) !=0 and i==0 and '.' not in folders: continue
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
# Cell
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
# Cell
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
# Cell
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
# Cell
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial that searches suffix `suf` and passes along `kwargs`, only in `folders`, if specified"
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
# Cell
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
# Cell
class ItemGetter(ItemTransform):
"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)"
_retain = False
def __init__(self, i): self.i = i
def encodes(self, x): return x[self.i]
# Cell
class AttrGetter(ItemTransform):
"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)"
_retain = False
def __init__(self, nm, default=None): store_attr()
def encodes(self, x): return getattr(x, self.nm, self.default)
# Cell
def RandomSplitter(valid_pct=0.2, seed=None):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(list(torch.randperm(len(o)).numpy()))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
# Cell
def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):
"Split `items` into random train and test subsets using sklearn train_test_split utility."
def _inner(o, **kwargs):
train,valid = train_test_split(range_of(o), test_size=test_size, random_state=random_state,
stratify=stratify, train_size=train_size, shuffle=shuffle)
return L(train), L(valid)
return _inner
# Cell
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
# Cell
def _grandparent_idxs(items, name):
def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
return [i for n in L(name) for i in _inner(items,n)]
# Cell
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
# Cell
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
# Cell
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o): return IndexSplitter(mask2idxs(mask))(o)
return _inner
# Cell
def FileSplitter(fname):
"Split `items` by providing file `fname` (contains names of valid items separated by newline)."
valid = Path(fname).read_text().split('\n')
def _func(x): return x.name in valid
def _inner(o): return FuncSplitter(_func)(o)
return _inner
# Cell
def ColSplitter(col='is_valid'):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
valid_idx = (o.iloc[:,col] if isinstance(col, int) else o[col]).values.astype('bool')
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
# Cell
def RandomSubsetSplitter(train_sz, valid_sz, seed=None):
"Take randoms subsets of `splits` with `train_sz` and `valid_sz`"
assert 0 < train_sz < 1
assert 0 < valid_sz < 1
assert train_sz + valid_sz <= 1.
def _inner(o):
if seed is not None: torch.manual_seed(seed)
train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)
idxs = L(list(torch.randperm(len(o)).numpy()))
return idxs[:train_len],idxs[train_len:train_len+valid_len]
return _inner
# Cell
def parent_label(o):
"Label `item` with the parent folder name."
return Path(o).parent.name
# Cell
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
res = self.matcher(str(o))
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
# Cell
class ColReader(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) else r[c] if c=='name' else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
# Cell
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False, strict=False):
if is_categorical_dtype(col):
items = L(col.cat.categories, use_list=True)
#Remove non-used categories while keeping order
if strict: items = L(o for o in items if o in col.unique())
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def map_objs(self,objs):
"Map `objs` to IDs"
return L(self.o2i[o] for o in objs)
def map_ids(self,ids):
"Map `ids` to objects in vocab"
return L(self.items[o] for o in ids)
def __eq__(self,b): return all_equal(b,self)
# Cell
class Categorize(DisplayedTransform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, sort=True, add_na=False):
if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)
store_attr()
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o):
try:
return TensorCategory(self.vocab.o2i[o])
except KeyError as e:
raise KeyError(f"Label '{o}' was not included in the training dataset") from e
def decodes(self, o): return Category (self.vocab [o])
# Cell
class Category(str, ShowTitle): _show_args = {'label': 'category'}
# Cell
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na,sort=vocab==None)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o):
if not all(elem in self.vocab.o2i.keys() for elem in o):
diff = [elem for elem in o if elem not in self.vocab.o2i.keys()]
diff_str = "', '".join(diff)
raise KeyError(f"Labels '{diff_str}' were not included in the training dataset")
return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
# Cell
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
# Cell
class OneHotEncode(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
# Cell
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab, sort=vocab==None)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
# Cell
class RegressionSetup(DisplayedTransform):
"Transform that floatifies targets"
loss_func=MSELossFlat()
def __init__(self, c=None): store_attr()
def encodes(self, o): return tensor(o).float()
def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)
def setups(self, dsets):
if self.c is not None: return
try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1
except: self.c = 0
# Cell
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c
if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
# Cell
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
# Cell
class IntToFloatTensor(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return o.long() // self.div_mask
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
# Cell
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# Cell
@docs
class Normalize(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch")
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/transforms.py
|
transforms.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/06_data.block.ipynb (unless otherwise specified).
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'RegressionBlock', 'DataBlock']
# Cell
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from .transforms import *
# Cell
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=None, dl_type=None, dls_kwargs=None):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = L(batch_tfms)
self.dl_type,self.dls_kwargs = dl_type,({} if dls_kwargs is None else dls_kwargs)
# Cell
def CategoryBlock(vocab=None, sort=True, add_na=False):
"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, sort=sort, add_na=add_na))
# Cell
def MultiCategoryBlock(encoded=False, vocab=None, add_na=False):
"`TransformBlock` for multi-label categorical targets"
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
# Cell
def RegressionBlock(n_out=None):
"`TransformBlock` for float targets"
return TransformBlock(type_tfms=RegressionSetup(c=n_out))
# Cell
from inspect import isfunction,ismethod
# Cell
def _merge_grouper(o):
if isinstance(o, LambdaType): return id(o)
elif isinstance(o, type): return o
elif (isfunction(o) or ismethod(o)): return o.__qualname__
return o.__class__
# Cell
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), _merge_grouper)
return L(v[-1] for k,v in g.items()).map(instantiate)
def _zip(x): return L(x).zip()
# Cell
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `Datasets` and `DataLoaders`"
get_x=get_items=splitter=get_y = None
blocks,dl_type = (TransformBlock,TransformBlock),TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
_msg = "If you wanted to compose several transforms in your getter don't forget to wrap them in a `Pipeline`."
def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, item_tfms=None, batch_tfms=None, **kwargs):
blocks = L(self.blocks if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for b in blocks:
if getattr(b, 'dl_type', None) is not None: self.dl_type = b.dl_type
if dl_type is not None: self.dl_type = dl_type
self.dataloaders = delegates(self.dl_type.__init__)(self.dataloaders)
self.dls_kwargs = merge(*blocks.attrgot('dls_kwargs', {}))
self.n_inp = ifnone(n_inp, max(1, len(blocks)-1))
self.getters = ifnone(getters, [noop]*len(self.type_tfms))
if self.get_x:
if len(L(self.get_x)) != self.n_inp:
raise ValueError(f'get_x contains {len(L(self.get_x))} functions, but must contain {self.n_inp} (one for each input)\n{self._msg}')
self.getters[:self.n_inp] = L(self.get_x)
if self.get_y:
n_targs = len(self.getters) - self.n_inp
if len(L(self.get_y)) != n_targs:
raise ValueError(f'get_y contains {len(L(self.get_y))} functions, but must contain {n_targs} (one for each target)\n{self._msg}')
self.getters[self.n_inp:] = L(self.get_y)
if kwargs: raise TypeError(f'invalid keyword arguments: {", ".join(kwargs.keys())}')
self.new(item_tfms, batch_tfms)
def _combine_type_tfms(self): return L([self.getters, self.type_tfms]).map_zip(
lambda g,tt: (g.fs if isinstance(g, Pipeline) else L(g)) + tt)
def new(self, item_tfms=None, batch_tfms=None):
self.item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
self.batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
return self
@classmethod
def from_columns(cls, blocks=None, getters=None, get_items=None, **kwargs):
if getters is None: getters = L(ItemGetter(i) for i in range(2 if blocks is None else len(L(blocks))))
get_items = _zip if get_items is None else compose(get_items, _zip)
return cls(blocks=blocks, getters=getters, get_items=get_items, **kwargs)
def datasets(self, source, verbose=False):
self.source = source ; pv(f"Collecting items from {source}", verbose)
items = (self.get_items or noop)(source) ; pv(f"Found {len(items)} items", verbose)
splits = (self.splitter or RandomSplitter())(items)
pv(f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose)
return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
def dataloaders(self, source, path='.', verbose=False, **kwargs):
dsets = self.datasets(source, verbose=verbose)
kwargs = {**self.dls_kwargs, **kwargs, 'verbose': verbose}
return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)
_docs = dict(new="Create a new `DataBlock` with other `item_tfms` and `batch_tfms`",
datasets="Create a `Datasets` object from `source`",
dataloaders="Create a `DataLoaders` object from `source`")
# Cell
def _short_repr(x):
if isinstance(x, tuple): return f'({", ".join([_short_repr(y) for y in x])})'
if isinstance(x, list): return f'[{", ".join([_short_repr(y) for y in x])}]'
if not isinstance(x, Tensor): return str(x)
if x.numel() <= 20 and x.ndim <=1: return str(x)
return f'{x.__class__.__name__} of size {"x".join([str(d) for d in x.shape])}'
# Cell
def _apply_pipeline(p, x):
print(f" {p}\n starting from\n {_short_repr(x)}")
for f in p.fs:
name = f.name
try:
x = f(x)
if name != "noop": print(f" applying {name} gives\n {_short_repr(x)}")
except Exception as e:
print(f" applying {name} failed.")
raise e
return x
# Cell
from .load import _collate_types
def _find_fail_collate(s):
s = L(*s)
for x in s[0]:
if not isinstance(x, _collate_types): return f"{type(x).__name__} is not collatable"
for i in range_of(s[0]):
try: _ = default_collate(s.itemgot(i))
except:
shapes = [getattr(o[i], 'shape', None) for o in s]
return f"Could not collate the {i}-th members of your tuples because got the following shapes\n{','.join([str(s) for s in shapes])}"
# Cell
@patch
def summary(self: DataBlock, source, bs=4, show_batch=False, **kwargs):
"Steps through the transform pipeline for one batch, and optionally calls `show_batch(**kwargs)` on the transient `Dataloaders`."
print(f"Setting-up type transforms pipelines")
dsets = self.datasets(source, verbose=True)
print("\nBuilding one sample")
for tl in dsets.train.tls:
_apply_pipeline(tl.tfms, get_first(dsets.train.items))
print(f"\nFinal sample: {dsets.train[0]}\n\n")
dls = self.dataloaders(source, bs=bs, verbose=True)
print("\nBuilding one batch")
if len([f for f in dls.train.after_item.fs if f.name != 'noop'])!=0:
print("Applying item_tfms to the first sample:")
s = [_apply_pipeline(dls.train.after_item, dsets.train[0])]
print(f"\nAdding the next {bs-1} samples")
s += [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
else:
print("No item_tfms to apply")
s = [dls.train.after_item(dsets.train[i]) for i in range(bs)]
if len([f for f in dls.train.before_batch.fs if f.name != 'noop'])!=0:
print("\nApplying before_batch to the list of samples")
s = _apply_pipeline(dls.train.before_batch, s)
else: print("\nNo before_batch transform to apply")
print("\nCollating items in a batch")
try:
b = dls.train.create_batch(s)
b = retain_types(b, s[0] if is_listy(s) else s)
except Exception as e:
print("Error! It's not possible to collate your items in a batch")
why = _find_fail_collate(s)
print("Make sure all parts of your samples are tensors of the same size" if why is None else why)
raise e
if len([f for f in dls.train.after_batch.fs if f.name != 'noop'])!=0:
print("\nApplying batch_tfms to the batch built")
b = to_device(b, dls.device)
b = _apply_pipeline(dls.train.after_batch, b)
else: print("\nNo batch_tfms to apply")
if show_batch: dls.show_batch(**kwargs)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/block.py
|
block.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_data.load.ipynb (unless otherwise specified).
__all__ = ['fa_collate', 'fa_convert', 'SkipItemException', 'DataLoader']
# Cell
from ..torch_basics import *
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter,_DatasetKind
_loaders = (_MultiProcessingDataLoaderIter,_SingleProcessDataLoaderIter)
# Cell
def _wif(worker_id):
set_num_threads(1)
info = get_worker_info()
ds = info.dataset.d
ds.num_workers,ds.offs = info.num_workers,info.id
set_seed(info.seed)
ds.wif()
class _FakeLoader:
_IterableDataset_len_called,_auto_collation,collate_fn,drop_last = None,False,noops,False
_index_sampler,generator,prefetch_factor = Inf.count,None,2
dataset_kind = _dataset_kind = _DatasetKind.Iterable
def __init__(self, d, pin_memory, num_workers, timeout, persistent_workers):
self.dataset,self.default,self.worker_init_fn = self,d,_wif
store_attr('d,pin_memory,num_workers,timeout,persistent_workers')
def __iter__(self): return iter(self.d.create_batches(self.d.sample()))
@property
def multiprocessing_context(self): return (None,multiprocessing)[self.num_workers>0]
@contextmanager
def no_multiproc(self):
old_num_workers = self.num_workers
try:
self.num_workers = 0
yield self.d
finally: self.num_workers = old_num_workers
_collate_types = (ndarray, Tensor, typing.Mapping, str)
# Cell
def fa_collate(t):
"A replacement for PyTorch `default_collate` which maintains types and handles `Sequence`s"
b = t[0]
return (default_collate(t) if isinstance(b, _collate_types)
else type(t[0])([fa_collate(s) for s in zip(*t)]) if isinstance(b, Sequence)
else default_collate(t))
# Cell
def fa_convert(t):
"A replacement for PyTorch `default_convert` which maintains types and handles `Sequence`s"
return (default_convert(t) if isinstance(t, _collate_types)
else type(t)([fa_convert(s) for s in t]) if isinstance(t, Sequence)
else default_convert(t))
# Cell
class SkipItemException(Exception):
"Raised to notify `DataLoader` to skip an item"
pass
# Cell
@funcs_kwargs
class DataLoader(GetAttr):
_noop_methods = 'wif before_iter after_item before_batch after_batch after_iter'.split()
for o in _noop_methods: exec(f"def {o}(self, x=None, *args, **kwargs): return x")
_methods = _noop_methods + 'create_batches create_item create_batch retain \
get_idxs sample shuffle_fn do_batch create_batch'.split()
_default = 'dataset'
def __init__(self, dataset=None, bs=None, num_workers=0, pin_memory=False, timeout=0, batch_size=None,
shuffle=False, drop_last=False, indexed=None, n=None, device=None, persistent_workers=False, **kwargs):
if batch_size is not None: bs = batch_size # PyTorch compatibility
assert not (bs is None and drop_last)
if indexed is None: indexed = dataset is not None and hasattr(dataset,'__getitem__')
if n is None:
try: n = len(dataset)
except TypeError: pass
store_attr('dataset,bs,shuffle,drop_last,indexed,n,pin_memory,timeout,device')
self.rng,self.num_workers,self.offs = random.Random(random.randint(0,2**32-1)),1,0
self.fake_l = _FakeLoader(self, pin_memory, num_workers, timeout, persistent_workers=persistent_workers)
def __len__(self):
if self.n is None: raise TypeError
if self.bs is None: return self.n
return self.n//self.bs + (0 if self.drop_last or self.n%self.bs==0 else 1)
def get_idxs(self):
idxs = Inf.count if self.indexed else Inf.nones
if self.n is not None: idxs = list(itertools.islice(idxs, self.n))
if self.shuffle: idxs = self.shuffle_fn(idxs)
return idxs
def sample(self):
return (b for i,b in enumerate(self.__idxs) if i//(self.bs or 1)%self.num_workers==self.offs)
def __iter__(self):
self.randomize()
self.before_iter()
self.__idxs=self.get_idxs() # called in context of main process (not workers/subprocesses)
for b in _loaders[self.fake_l.num_workers==0](self.fake_l):
if self.device is not None: b = to_device(b, self.device)
yield self.after_batch(b)
self.after_iter()
if hasattr(self, 'it'): del(self.it)
def create_batches(self, samps):
self.it = iter(self.dataset) if self.dataset is not None else None
res = filter(lambda o:o is not None, map(self.do_item, samps))
yield from map(self.do_batch, self.chunkify(res))
def new(self, dataset=None, cls=None, **kwargs):
if dataset is None: dataset = self.dataset
if cls is None: cls = type(self)
cur_kwargs = dict(dataset=dataset, num_workers=self.fake_l.num_workers, pin_memory=self.pin_memory, timeout=self.timeout,
bs=self.bs, shuffle=self.shuffle, drop_last=self.drop_last, indexed=self.indexed, device=self.device)
for n in self._methods:
o = getattr(self, n)
if not isinstance(o, MethodType): cur_kwargs[n] = o
return cls(**merge(cur_kwargs, kwargs))
@property
def prebatched(self): return self.bs is None
def do_item(self, s):
try: return self.after_item(self.create_item(s))
except SkipItemException: return None
def chunkify(self, b): return b if self.prebatched else chunked(b, self.bs, self.drop_last)
def shuffle_fn(self, idxs): return self.rng.sample(idxs, len(idxs))
def randomize(self): self.rng = random.Random(self.rng.randint(0,2**32-1))
def retain(self, res, b): return retain_types(res, b[0] if is_listy(b) else b)
def create_item(self, s): return next(self.it) if s is None else self.dataset[s]
def create_batch(self, b): return (fa_collate,fa_convert)[self.prebatched](b)
def do_batch(self, b): return self.retain(self.create_batch(self.before_batch(b)), b)
def to(self, device): self.device = device
def one_batch(self):
if self.n is not None and len(self)==0: raise ValueError(f'This DataLoader does not contain any batches')
with self.fake_l.no_multiproc(): res = first(self)
if hasattr(self, 'it'): delattr(self, 'it')
return res
# Cell
add_docs(DataLoader, "API compatible with PyTorch DataLoader, with a lot more callbacks and flexibility",
get_idxs = "Return a list of indices to reference the dataset. Calls `shuffle_fn` internally if `shuffle=True`.",
sample = "Same as `get_idxs` but returns a generator of indices to reference the dataset.",
create_batches = "Takes output of `sample` as input, and returns batches of data. Does not apply `after_batch`.",
new = "Create a new `DataLoader` with given arguments keeping remaining arguments same as original `DataLoader`.",
prebatched = "Check if `bs` is None.",
do_item = "Combines `after_item` and `create_item` to get an item from dataset by providing index as input.",
chunkify = "Used by `create_batches` to turn generator of items (`b`) into batches.",
shuffle_fn = "Returns a random permutation of `idxs`.",
randomize = "Set's `DataLoader` random number generator state.",
retain = "Cast each item of `res` to type of matching item in `b` if its a superclass.",
create_item = "Subset of the dataset containing the index values of sample if exists, else next iterator.",
create_batch = "Collate a list of items into a batch.",
do_batch = "Combines `create_batch` and `before_batch` to get a batch of items. Input is a list of items to collate.",
to = "Sets `self.device=device`.",
one_batch = "Return one batch from `DataLoader`.",
wif = "See pytorch `worker_init_fn` for details.",
before_iter = "Called before `DataLoader` starts to read/iterate over the dataset.",
after_item = "Takes output of `create_item` as input and applies this function on it.",
before_batch = "It is called before collating a list of items into a batch. Input is a list of items.",
after_batch = "After collating mini-batch of items, the mini-batch is passed through this function.",
after_iter = "Called after `DataLoader` has fully read/iterated over the dataset.")
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/load.py
|
load.py
|
from .all import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/data/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/41_tabular.data.ipynb (unless otherwise specified).
__all__ = ['TabularDataLoaders']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
# Cell
class TabularDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for tabular data"
@classmethod
@delegates(Tabular.dataloaders, but=["dl_type", "dl_kwargs"])
def from_df(cls, df, path='.', procs=None, cat_names=None, cont_names=None, y_names=None, y_block=None,
valid_idx=None, **kwargs):
"Create from `df` in `path` using `procs`"
if cat_names is None: cat_names = []
if cont_names is None: cont_names = list(set(df)-set(L(cat_names))-set(L(y_names)))
splits = RandomSplitter()(df) if valid_idx is None else IndexSplitter(valid_idx)(df)
to = TabularPandas(df, procs, cat_names, cont_names, y_names, splits=splits, y_block=y_block)
return to.dataloaders(path=path, **kwargs)
@classmethod
def from_csv(cls, csv, skipinitialspace=True, **kwargs):
"Create from `csv` file in `path` using `procs`"
return cls.from_df(pd.read_csv(csv, skipinitialspace=skipinitialspace), **kwargs)
@delegates(TabDataLoader.__init__)
def test_dl(self, test_items, rm_type_tfms=None, process=True, **kwargs):
to = self.train_ds.new(test_items)
if process: to.process()
return self.valid.new(to, **kwargs)
Tabular._dbunch_type = TabularDataLoaders
TabularDataLoaders.from_csv = delegates(to=TabularDataLoaders.from_df)(TabularDataLoaders.from_csv)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/data.py
|
data.py
|
from ..basics import *
from ..callback.all import *
from .core import *
from .data import *
from .model import *
from .learner import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/all.py
|
all.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/43_tabular.learner.ipynb (unless otherwise specified).
__all__ = ['TabularLearner', 'tabular_learner']
# Cell
from ..basics import *
from .core import *
from .model import *
# Cell
class TabularLearner(Learner):
"`Learner` for tabular data"
def predict(self, row):
"Predict on a Pandas Series"
dl = self.dls.test_dl(row.to_frame().T)
dl.dataset.conts = dl.dataset.conts.astype(np.float32)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
b = (*tuplify(inp),*tuplify(dec_preds))
full_dec = self.dls.decode(b)
return full_dec,dec_preds[0],preds[0]
# Cell
@delegates(Learner.__init__)
def tabular_learner(dls, layers=None, emb_szs=None, config=None, n_out=None, y_range=None, **kwargs):
"Get a `Learner` using `dls`, with `metrics`, including a `TabularModel` created using the remaining params."
if config is None: config = tabular_config()
if layers is None: layers = [200,100]
to = dls.train_ds
emb_szs = get_emb_sz(dls.train_ds, {} if emb_szs is None else emb_szs)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
if y_range is None and 'y_range' in config: y_range = config.pop('y_range')
model = TabularModel(emb_szs, len(dls.cont_names), n_out, layers, y_range=y_range, **config)
return TabularLearner(dls, model, **kwargs)
# Cell
@typedispatch
def show_results(x:Tabular, y:Tabular, samples, outs, ctxs=None, max_n=10, **kwargs):
df = x.all_cols[:max_n]
for n in x.y_names: df[n+'_pred'] = y[n][:max_n].values
display_df(df)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/learner.py
|
learner.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/40_tabular.core.ipynb (unless otherwise specified).
__all__ = ['make_date', 'add_datepart', 'add_elapsed_times', 'cont_cat_split', 'df_shrink_dtypes', 'df_shrink',
'Tabular', 'TabularPandas', 'TabularProc', 'Categorify', 'FillStrategy', 'FillMissing', 'ReadTabBatch',
'TabDataLoader']
# Cell
from ..torch_basics import *
from ..data.all import *
# Cell
pd.set_option('mode.chained_assignment','raise')
# Cell
def make_date(df, date_field):
"Make sure `df[date_field]` is of the right date type."
field_dtype = df[date_field].dtype
if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
field_dtype = np.datetime64
if not np.issubdtype(field_dtype, np.datetime64):
df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True)
# Cell
def add_datepart(df, field_name, prefix=None, drop=True, time=False):
"Helper function that adds columns relevant to a date in the column `field_name` of `df`."
make_date(df, field_name)
field = df[field_name]
prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name))
attr = ['Year', 'Month', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start',
'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[prefix + n] = getattr(field.dt, n.lower())
# Pandas removed `dt.week` in v1.1.10
week = field.dt.isocalendar().week if hasattr(field.dt, 'isocalendar') else field.dt.week
df.insert(3, prefix+'Week', week)
mask = ~field.isna()
df[prefix + 'Elapsed'] = np.where(mask,field.values.astype(np.int64) // 10 ** 9,None)
if drop: df.drop(field_name, axis=1, inplace=True)
return df
# Cell
def _get_elapsed(df,field_names, date_field, base_field, prefix):
for f in field_names:
day1 = np.timedelta64(1, 'D')
last_date,last_base,res = np.datetime64(),None,[]
for b,v,d in zip(df[base_field].values, df[f].values, df[date_field].values):
if last_base is None or b != last_base:
last_date,last_base = np.datetime64(),b
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1))
df[prefix + f] = res
return df
# Cell
def add_elapsed_times(df, field_names, date_field, base_field):
"Add in `df` for each event in `field_names` the elapsed time according to `date_field` grouped by `base_field`"
field_names = list(L(field_names))
#Make sure date_field is a date and base_field a bool
df[field_names] = df[field_names].astype('bool')
make_date(df, date_field)
work_df = df[field_names + [date_field, base_field]]
work_df = work_df.sort_values([base_field, date_field])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'After')
work_df = work_df.sort_values([base_field, date_field], ascending=[True, False])
work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'Before')
for a in ['After' + f for f in field_names] + ['Before' + f for f in field_names]:
work_df[a] = work_df[a].fillna(0).astype(int)
for a,s in zip([True, False], ['_bw', '_fw']):
work_df = work_df.set_index(date_field)
tmp = (work_df[[base_field] + field_names].sort_index(ascending=a)
.groupby(base_field).rolling(7, min_periods=1).sum())
tmp.drop(base_field,1,inplace=True)
tmp.reset_index(inplace=True)
work_df.reset_index(inplace=True)
work_df = work_df.merge(tmp, 'left', [date_field, base_field], suffixes=['', s])
work_df.drop(field_names,1,inplace=True)
return df.merge(work_df, 'left', [date_field, base_field])
# Cell
def cont_cat_split(df, max_card=20, dep_var=None):
"Helper function that returns column names of cont and cat variables from given `df`."
cont_names, cat_names = [], []
for label in df:
if label in L(dep_var): continue
if (np.issubdtype(df[label].dtype, np.integer) and
df[label].unique().shape[0] > max_card or
np.issubdtype(df[label].dtype, np.floating)):
cont_names.append(label)
else: cat_names.append(label)
return cont_names, cat_names
# Cell
def df_shrink_dtypes(df, skip=[], obj2cat=True, int2uint=False):
"Return any possible smaller data types for DataFrame columns. Allows `object`->`category`, `int`->`uint`, and exclusion."
# 1: Build column filter and typemap
excl_types, skip = {'category','datetime64[ns]','bool'}, set(skip)
typemap = {'int' : [(np.dtype(x), np.iinfo(x).min, np.iinfo(x).max) for x in (np.int8, np.int16, np.int32, np.int64)],
'uint' : [(np.dtype(x), np.iinfo(x).min, np.iinfo(x).max) for x in (np.uint8, np.uint16, np.uint32, np.uint64)],
'float' : [(np.dtype(x), np.finfo(x).min, np.finfo(x).max) for x in (np.float32, np.float64, np.longdouble)]
}
if obj2cat: typemap['object'] = 'category' # User wants to categorify dtype('Object'), which may not always save space
else: excl_types.add('object')
new_dtypes = {}
exclude = lambda dt: dt[1].name not in excl_types and dt[0] not in skip
for c, old_t in filter(exclude, df.dtypes.items()):
t = next((v for k,v in typemap.items() if old_t.name.startswith(k)), None)
if isinstance(t, list): # Find the smallest type that fits
if int2uint and t==typemap['int'] and df[c].min() >= 0: t=typemap['uint']
new_t = next((r[0] for r in t if r[1]<=df[c].min() and r[2]>=df[c].max()), None)
if new_t and new_t == old_t: new_t = None
else: new_t = t if isinstance(t, str) else None
if new_t: new_dtypes[c] = new_t
return new_dtypes
# Cell
def df_shrink(df, skip=[], obj2cat=True, int2uint=False):
"Reduce DataFrame memory usage, by casting to smaller types returned by `df_shrink_dtypes()`."
dt = df_shrink_dtypes(df, skip, obj2cat=obj2cat, int2uint=int2uint)
return df.astype(dt)
# Cell
class _TabIloc:
"Get/set rows by iloc and cols by name"
def __init__(self,to): self.to = to
def __getitem__(self, idxs):
df = self.to.items
if isinstance(idxs,tuple):
rows,cols = idxs
cols = df.columns.isin(cols) if is_listy(cols) else df.columns.get_loc(cols)
else: rows,cols = idxs,slice(None)
return self.to.new(df.iloc[rows, cols])
# Cell
class Tabular(CollBase, GetAttr, FilteredBase):
"A `DataFrame` wrapper that knows which cols are cont/cat/y, and returns rows in `__getitem__`"
_default,with_cont='procs',True
def __init__(self, df, procs=None, cat_names=None, cont_names=None, y_names=None, y_block=None, splits=None,
do_setup=True, device=None, inplace=False, reduce_memory=True):
if inplace and splits is not None and pd.options.mode.chained_assignment is not None:
warn("Using inplace with splits will trigger a pandas error. Set `pd.options.mode.chained_assignment=None` to avoid it.")
if not inplace: df = df.copy()
if reduce_memory: df = df_shrink(df)
if splits is not None: df = df.iloc[sum(splits, [])]
self.dataloaders = delegates(self._dl_type.__init__)(self.dataloaders)
super().__init__(df)
self.y_names,self.device = L(y_names),device
if y_block is None and self.y_names:
# Make ys categorical if they're not numeric
ys = df[self.y_names]
if len(ys.select_dtypes(include='number').columns)!=len(ys.columns): y_block = CategoryBlock()
else: y_block = RegressionBlock()
if y_block is not None and do_setup:
if callable(y_block): y_block = y_block()
procs = L(procs) + y_block.type_tfms
self.cat_names,self.cont_names,self.procs = L(cat_names),L(cont_names),Pipeline(procs)
self.split = len(df) if splits is None else len(splits[0])
if do_setup: self.setup()
def new(self, df):
return type(self)(df, do_setup=False, reduce_memory=False, y_block=TransformBlock(),
**attrdict(self, 'procs','cat_names','cont_names','y_names', 'device'))
def subset(self, i): return self.new(self.items[slice(0,self.split) if i==0 else slice(self.split,len(self))])
def copy(self): self.items = self.items.copy(); return self
def decode(self): return self.procs.decode(self)
def decode_row(self, row): return self.new(pd.DataFrame(row).T).decode().items.iloc[0]
def show(self, max_n=10, **kwargs): display_df(self.new(self.all_cols[:max_n]).decode().items)
def setup(self): self.procs.setup(self)
def process(self): self.procs(self)
def loc(self): return self.items.loc
def iloc(self): return _TabIloc(self)
def targ(self): return self.items[self.y_names]
def x_names (self): return self.cat_names + self.cont_names
def n_subsets(self): return 2
def y(self): return self[self.y_names[0]]
def new_empty(self): return self.new(pd.DataFrame({}, columns=self.items.columns))
def to_device(self, d=None):
self.device = d
return self
def all_col_names (self):
ys = [n for n in self.y_names if n in self.items.columns]
return self.x_names + self.y_names if len(ys) == len(self.y_names) else self.x_names
properties(Tabular,'loc','iloc','targ','all_col_names','n_subsets','x_names','y')
# Cell
class TabularPandas(Tabular):
"A `Tabular` object with transforms"
def transform(self, cols, f, all_col=True):
if not all_col: cols = [c for c in cols if c in self.items.columns]
if len(cols) > 0: self[cols] = self[cols].transform(f)
# Cell
def _add_prop(cls, nm):
@property
def f(o): return o[list(getattr(o,nm+'_names'))]
@f.setter
def fset(o, v): o[getattr(o,nm+'_names')] = v
setattr(cls, nm+'s', f)
setattr(cls, nm+'s', fset)
_add_prop(Tabular, 'cat')
_add_prop(Tabular, 'cont')
_add_prop(Tabular, 'y')
_add_prop(Tabular, 'x')
_add_prop(Tabular, 'all_col')
# Cell
class TabularProc(InplaceTransform):
"Base class to write a non-lazy tabular processor for dataframes"
def setup(self, items=None, train_setup=False): #TODO: properly deal with train_setup
super().setup(getattr(items,'train',items), train_setup=False)
# Procs are called as soon as data is available
return self(items.items if isinstance(items,Datasets) else items)
@property
def name(self): return f"{super().name} -- {getattr(self,'__stored_args__',{})}"
# Cell
def _apply_cats (voc, add, c):
if not is_categorical_dtype(c):
return pd.Categorical(c, categories=voc[c.name][add:]).codes+add
return c.cat.codes+add #if is_categorical_dtype(c) else c.map(voc[c.name].o2i)
def _decode_cats(voc, c): return c.map(dict(enumerate(voc[c.name].items)))
# Cell
class Categorify(TabularProc):
"Transform the categorical variables to something similar to `pd.Categorical`"
order = 1
def setups(self, to):
store_attr(classes={n:CategoryMap(to.iloc[:,n].items, add_na=(n in to.cat_names)) for n in to.cat_names}, but='to')
def encodes(self, to): to.transform(to.cat_names, partial(_apply_cats, self.classes, 1))
def decodes(self, to): to.transform(to.cat_names, partial(_decode_cats, self.classes))
def __getitem__(self,k): return self.classes[k]
# Internal Cell
@Categorize
def setups(self, to:Tabular):
if len(to.y_names) > 0:
if self.vocab is None:
self.vocab = CategoryMap(getattr(to, 'train', to).iloc[:,to.y_names[0]].items, strict=True)
else:
self.vocab = CategoryMap(self.vocab, sort=False, add_na=self.add_na)
self.c = len(self.vocab)
return self(to)
@Categorize
def encodes(self, to:Tabular):
to.transform(to.y_names, partial(_apply_cats, {n: self.vocab for n in to.y_names}, 0), all_col=False)
return to
@Categorize
def decodes(self, to:Tabular):
to.transform(to.y_names, partial(_decode_cats, {n: self.vocab for n in to.y_names}), all_col=False)
return to
# Internal Cell
@Normalize
def setups(self, to:Tabular):
store_attr(but='to', means=dict(getattr(to, 'train', to).conts.mean()),
stds=dict(getattr(to, 'train', to).conts.std(ddof=0)+1e-7))
return self(to)
@Normalize
def encodes(self, to:Tabular):
to.conts = (to.conts-self.means) / self.stds
return to
@Normalize
def decodes(self, to:Tabular):
to.conts = (to.conts*self.stds ) + self.means
return to
# Cell
class FillStrategy:
"Namespace containing the various filling strategies."
def median (c,fill): return c.median()
def constant(c,fill): return fill
def mode (c,fill): return c.dropna().value_counts().idxmax()
# Cell
class FillMissing(TabularProc):
"Fill the missing values in continuous columns."
def __init__(self, fill_strategy=FillStrategy.median, add_col=True, fill_vals=None):
if fill_vals is None: fill_vals = defaultdict(int)
store_attr()
def setups(self, dsets):
missing = pd.isnull(dsets.conts).any()
store_attr(but='to', na_dict={n:self.fill_strategy(dsets[n], self.fill_vals[n])
for n in missing[missing].keys()})
self.fill_strategy = self.fill_strategy.__name__
def encodes(self, to):
missing = pd.isnull(to.conts)
for n in missing.any()[missing.any()].keys():
assert n in self.na_dict, f"nan values in `{n}` but not in setup training set"
for n in self.na_dict.keys():
to[n].fillna(self.na_dict[n], inplace=True)
if self.add_col:
to.loc[:,n+'_na'] = missing[n]
if n+'_na' not in to.cat_names: to.cat_names.append(n+'_na')
# Cell
def _maybe_expand(o): return o[:,None] if o.ndim==1 else o
# Cell
class ReadTabBatch(ItemTransform):
"Transform `TabularPandas` values into a `Tensor` with the ability to decode"
def __init__(self, to): self.to = to.new_empty()
def encodes(self, to):
if not to.with_cont: res = (tensor(to.cats).long(),)
else: res = (tensor(to.cats).long(),tensor(to.conts).float())
ys = [n for n in to.y_names if n in to.items.columns]
if len(ys) == len(to.y_names): res = res + (tensor(to.targ),)
if to.device is not None: res = to_device(res, to.device)
return res
def decodes(self, o):
o = [_maybe_expand(o_) for o_ in to_np(o) if o_.size != 0]
vals = np.concatenate(o, axis=1)
try: df = pd.DataFrame(vals, columns=self.to.all_col_names)
except: df = pd.DataFrame(vals, columns=self.to.x_names)
to = self.to.new(df)
return to
# Cell
@typedispatch
def show_batch(x: Tabular, y, its, max_n=10, ctxs=None):
x.show()
# Cell
@delegates()
class TabDataLoader(TfmdDL):
"A transformed `DataLoader` for Tabular data"
do_item = noops
def __init__(self, dataset, bs=16, shuffle=False, after_batch=None, num_workers=0, **kwargs):
if after_batch is None: after_batch = L(TransformBlock().batch_tfms)+ReadTabBatch(dataset)
super().__init__(dataset, bs=bs, shuffle=shuffle, after_batch=after_batch, num_workers=num_workers, **kwargs)
def create_batch(self, b): return self.dataset.iloc[b]
TabularPandas._dl_type = TabDataLoader
# Internal Cell
@EncodedMultiCategorize
def setups(self, to:Tabular):
self.c = len(self.vocab)
return self(to)
@EncodedMultiCategorize
def encodes(self, to:Tabular): return to
@EncodedMultiCategorize
def decodes(self, to:Tabular):
to.transform(to.y_names, lambda c: c==1)
return to
# Internal Cell
@RegressionSetup
def setups(self, to:Tabular):
if self.c is not None: return
self.c = len(to.y_names)
return to
@RegressionSetup
def encodes(self, to:Tabular): return to
@RegressionSetup
def decodes(self, to:Tabular): return to
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/core.py
|
core.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/42_tabular.model.ipynb (unless otherwise specified).
__all__ = ['emb_sz_rule', 'get_emb_sz', 'TabularModel', 'tabular_config']
# Cell
from ..torch_basics import *
from .core import *
# Cell
def emb_sz_rule(n_cat):
"Rule of thumb to pick embedding size corresponding to `n_cat`"
return min(600, round(1.6 * n_cat**0.56))
# Cell
def _one_emb_sz(classes, n, sz_dict=None):
"Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`."
sz_dict = ifnone(sz_dict, {})
n_cat = len(classes[n])
sz = sz_dict.get(n, int(emb_sz_rule(n_cat))) # rule of thumb
return n_cat,sz
# Cell
def get_emb_sz(to, sz_dict=None):
"Get default embedding size from `TabularPreprocessor` `proc` or the ones in `sz_dict`"
return [_one_emb_sz(to.classes, n, sz_dict) for n in to.cat_names]
# Cell
class TabularModel(Module):
"Basic model for tabular data."
def __init__(self, emb_szs, n_cont, out_sz, layers, ps=None, embed_p=0.,
y_range=None, use_bn=True, bn_final=False, bn_cont=True, act_cls=nn.ReLU(inplace=True)):
ps = ifnone(ps, [0]*len(layers))
if not is_listy(ps): ps = [ps]*len(layers)
self.embeds = nn.ModuleList([Embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(embed_p)
self.bn_cont = nn.BatchNorm1d(n_cont) if bn_cont else None
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb,self.n_cont = n_emb,n_cont
sizes = [n_emb + n_cont] + layers + [out_sz]
actns = [act_cls for _ in range(len(sizes)-2)] + [None]
_layers = [LinBnDrop(sizes[i], sizes[i+1], bn=use_bn and (i!=len(actns)-1 or bn_final), p=p, act=a)
for i,(p,a) in enumerate(zip(ps+[0.],actns))]
if y_range is not None: _layers.append(SigmoidRange(*y_range))
self.layers = nn.Sequential(*_layers)
def forward(self, x_cat, x_cont=None):
if self.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
x = torch.cat(x, 1)
x = self.emb_drop(x)
if self.n_cont != 0:
if self.bn_cont is not None: x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1) if self.n_emb != 0 else x_cont
return self.layers(x)
# Cell
@delegates(TabularModel.__init__)
def tabular_config(**kwargs):
"Convenience function to easily create a config for `TabularModel`"
return kwargs
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/model.py
|
model.py
|
from .all import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/tabular/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/31_text.data.ipynb (unless otherwise specified).
__all__ = ['reverse_text', 'make_vocab', 'TensorText', 'LMTensorText', 'Numericalize', 'LMDataLoader', 'Pad_Input',
'pad_input', 'pad_chunk', 'pad_input_chunk', 'Pad_Chunk', 'SortedDL', 'TextBlock', 'TextDataLoaders']
# Cell
from ..torch_basics import *
from ..data.all import *
from .core import *
# Cell
def reverse_text(x): return x.flip(0)
# Cell
def make_vocab(count, min_freq=3, max_vocab=60000, special_toks=None):
"Create a vocab of `max_vocab` size from `Counter` `count` with items present more than `min_freq`"
vocab = [o for o,c in count.most_common(max_vocab) if c >= min_freq]
special_toks = ifnone(special_toks, defaults.text_spec_tok)
for o in reversed(special_toks): #Make sure all special tokens are in the vocab
if o in vocab: vocab.remove(o)
vocab.insert(0, o)
vocab = vocab[:max_vocab]
return vocab + [f'xxfake' for i in range(0, 8-len(vocab)%8)]
# Cell
class TensorText(TensorBase): pass
class LMTensorText(TensorText): pass
TensorText.__doc__ = "Semantic type for a tensor representing text"
LMTensorText.__doc__ = "Semantic type for a tensor representing text in language modeling"
# Cell
class Numericalize(Transform):
"Reversible transform of tokenized texts to numericalized ids"
def __init__(self, vocab=None, min_freq=3, max_vocab=60000, special_toks=None):
store_attr('vocab,min_freq,max_vocab,special_toks')
self.o2i = None if vocab is None else defaultdict(int, {v:k for k,v in enumerate(vocab)})
def setups(self, dsets):
if dsets is None: return
if self.vocab is None:
count = dsets.counter if getattr(dsets, 'counter', None) is not None else Counter(p for o in dsets for p in o)
if self.special_toks is None and hasattr(dsets, 'special_toks'):
self.special_toks = dsets.special_toks
self.vocab = make_vocab(count, min_freq=self.min_freq, max_vocab=self.max_vocab, special_toks=self.special_toks)
self.o2i = defaultdict(int, {v:k for k,v in enumerate(self.vocab) if v != 'xxfake'})
def encodes(self, o): return TensorText(tensor([self.o2i [o_] for o_ in o]))
def decodes(self, o): return L(self.vocab[o_] for o_ in o)
# Cell
def _maybe_first(o): return o[0] if isinstance(o, tuple) else o
# Cell
def _get_tokenizer(ds):
tok = getattr(ds, 'tokenizer', None)
if isinstance(tok, Tokenizer): return tok
if isinstance(tok, (list,L)):
for t in tok:
if isinstance(t, Tokenizer): return t
# Cell
def _get_lengths(ds):
tok = _get_tokenizer(ds)
if tok is None: return
return tok.get_lengths(ds.items)
# Cell
#TODO: add backward
@delegates()
class LMDataLoader(TfmdDL):
"A `DataLoader` suitable for language modeling"
def __init__(self, dataset, lens=None, cache=2, bs=64, seq_len=72, num_workers=0, **kwargs):
self.items = ReindexCollection(dataset, cache=cache, tfm=_maybe_first)
self.seq_len = seq_len
if lens is None: lens = _get_lengths(dataset)
if lens is None: lens = [len(o) for o in self.items]
self.lens = ReindexCollection(lens, idxs=self.items.idxs)
# The "-1" is to allow for final label, we throw away the end that's less than bs
corpus = round_multiple(sum(lens)-1, bs, round_down=True)
self.bl = corpus//bs #bl stands for batch length
self.n_batches = self.bl//(seq_len) + int(self.bl%seq_len!=0)
self.last_len = self.bl - (self.n_batches-1)*seq_len
self.make_chunks()
super().__init__(dataset=dataset, bs=bs, num_workers=num_workers, **kwargs)
self.n = self.n_batches*bs
def make_chunks(self): self.chunks = Chunks(self.items, self.lens)
def shuffle_fn(self,idxs):
self.items.shuffle()
self.make_chunks()
return idxs
def create_item(self, seq):
if seq>=self.n: raise IndexError
sl = self.last_len if seq//self.bs==self.n_batches-1 else self.seq_len
st = (seq%self.bs)*self.bl + (seq//self.bs)*self.seq_len
txt = self.chunks[st : st+sl+1]
return LMTensorText(txt[:-1]),txt[1:]
@delegates(TfmdDL.new)
def new(self, dataset=None, seq_len=None, **kwargs):
lens = self.lens.coll if dataset is None else None
seq_len = self.seq_len if seq_len is None else seq_len
return super().new(dataset=dataset, lens=lens, seq_len=seq_len, **kwargs)
# Cell
@typedispatch
def show_batch(x: TensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
if trunc_at is not None: samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_batch[object](x, y, samples, max_n=max_n, ctxs=ctxs, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_batch(x: LMTensorText, y, samples, ctxs=None, max_n=10, trunc_at=150, **kwargs):
samples = L((s[0].truncate(trunc_at), s[1].truncate(trunc_at)) for s in samples)
return show_batch[TensorText](x, None, samples, ctxs=ctxs, max_n=max_n, trunc_at=None, **kwargs)
# Cell
class Pad_Input(ItemTransform):
def encodes(self,samples, pad_idx=1, pad_fields=0, pad_first=False, backwards=False):
"Function that collect `samples` and adds padding"
self.pad_idx = pad_idx
pad_fields = L(pad_fields)
max_len_l = pad_fields.map(lambda f: max([len(s[f]) for s in samples]))
if backwards: pad_first = not pad_first
def _f(field_idx, x):
if field_idx not in pad_fields: return x
idx = pad_fields.items.index(field_idx) #TODO: remove items if L.index is fixed
sl = slice(-len(x), sys.maxsize) if pad_first else slice(0, len(x))
pad = x.new_zeros(max_len_l[idx]-x.shape[0])+pad_idx
x1 = torch.cat([pad, x] if pad_first else [x, pad])
if backwards: x1 = x1.flip(0)
return retain_type(x1, x)
return [tuple(map(lambda idxx: _f(*idxx), enumerate(s))) for s in samples]
def decodes(self, o:TensorText):
pad_idx = self.pad_idx if hasattr(self,'pad_idx') else 1
return o[o != pad_idx]
pad_input=Pad_Input()
# Cell
def pad_chunk(x,pad_idx=1, pad_first=True, seq_len=72, pad_len=10):
"Pad `x` by adding padding by chunks of size `seq_len`"
l = pad_len - x.shape[0]
pad_chunk = x.new_zeros((l//seq_len) * seq_len) + pad_idx
pad_res = x.new_zeros(l % seq_len) + pad_idx
x1 = torch.cat([pad_chunk, x, pad_res]) if pad_first else torch.cat([x, pad_chunk, pad_res])
return retain_type(x1, x)
# Cell
@delegates(pad_chunk)
def pad_input_chunk(samples, n_inp=1,**kwargs):
"Pad `samples` by adding padding by chunks of size `seq_len`"
max_len = max([len(s[n]) for s in samples for n in range(n_inp)])
padeds = [[pad_chunk(s[n],pad_len=max_len,**kwargs) for n in range(n_inp) ] for s in samples]
return [(*p, *s[n_inp:]) for p,s in zip(padeds,samples)]
# Cell
class Pad_Chunk(DisplayedTransform):
"Pad `samples` by adding padding by chunks of size `seq_len`"
def __init__(self, pad_idx=1, pad_first=True, seq_len=72,decode=True,**kwargs):
store_attr('pad_idx, pad_first, seq_len,seq_len')
super().__init__(**kwargs)
def before_call(self, b):
"Set `self.max_len` before encodes"
self.max_len = max([x.shape[0] for xs in b for x in xs if isinstance(x,TensorText)])
def __call__(self, b, **kwargs):
self.before_call(b)
return super().__call__(tuple(b), **kwargs)
def encodes(self, x:TensorText):
return pad_chunk(x,pad_idx=self.pad_idx, pad_first=self.pad_first, seq_len=self.seq_len, pad_len=self.max_len)
def decodes(self, o:TensorText):
return o[o != self.pad_idx] if self.decode else o
# Cell
def _default_sort(x): return len(x[0])
@delegates(TfmdDL)
class SortedDL(TfmdDL):
"A `DataLoader` that goes throught the item in the order given by `sort_func`"
def __init__(self, dataset, sort_func=None, res=None, **kwargs):
super().__init__(dataset, **kwargs)
self.sort_func = _default_sort if sort_func is None else sort_func
if res is None and self.sort_func == _default_sort: res = _get_lengths(dataset)
self.res = [self.sort_func(self.do_item(i)) for i in range_of(self.dataset)] if res is None else res
if len(self.res) > 0: self.idx_max = np.argmax(self.res)
def get_idxs(self):
idxs = super().get_idxs()
if self.shuffle: return idxs
return sorted(idxs, key=lambda i: self.res[i], reverse=True)
def shuffle_fn(self,idxs):
idxs = np.random.permutation(len(self.dataset))
idx_max = np.where(idxs==self.idx_max)[0][0]
idxs[0],idxs[idx_max] = idxs[idx_max],idxs[0]
sz = self.bs*50
chunks = [idxs[i:i+sz] for i in range(0, len(idxs), sz)]
chunks = [sorted(s, key=lambda i: self.res[i], reverse=True) for s in chunks]
sort_idx = np.concatenate(chunks)
sz = self.bs
batches = [sort_idx[i:i+sz] for i in range(0, len(sort_idx), sz)]
sort_idx = np.concatenate(np.random.permutation(batches[1:-1])) if len(batches) > 2 else np.array([],dtype=np.int)
sort_idx = np.concatenate((batches[0], sort_idx) if len(batches)==1 else (batches[0], sort_idx, batches[-1]))
return iter(sort_idx)
@delegates(TfmdDL.new)
def new(self, dataset=None, **kwargs):
if 'val_res' in kwargs and kwargs['val_res'] is not None: res = kwargs['val_res']
else: res = self.res if dataset is None else None
return super().new(dataset=dataset, res=res, **kwargs)
# Cell
class TextBlock(TransformBlock):
"A `TransformBlock` for texts"
@delegates(Numericalize.__init__)
def __init__(self, tok_tfm, vocab=None, is_lm=False, seq_len=72, backwards=False, **kwargs):
type_tfms = [tok_tfm, Numericalize(vocab, **kwargs)]
if backwards: type_tfms += [reverse_text]
return super().__init__(type_tfms=type_tfms,
dl_type=LMDataLoader if is_lm else SortedDL,
dls_kwargs={'seq_len': seq_len} if is_lm else {'before_batch': Pad_Chunk(seq_len=seq_len)})
@classmethod
@delegates(Tokenizer.from_df, keep=True)
def from_df(cls, text_cols, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a dataframe using `text_cols`"
return cls(Tokenizer.from_df(text_cols, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
@classmethod
@delegates(Tokenizer.from_folder, keep=True)
def from_folder(cls, path, vocab=None, is_lm=False, seq_len=72, backwards=False, min_freq=3, max_vocab=60000, **kwargs):
"Build a `TextBlock` from a `path`"
return cls(Tokenizer.from_folder(path, **kwargs), vocab=vocab, is_lm=is_lm, seq_len=seq_len,
backwards=backwards, min_freq=min_freq, max_vocab=max_vocab)
# Cell
class TextDataLoaders(DataLoaders):
"Basic wrapper around several `DataLoader`s with factory methods for NLP problems"
@classmethod
@delegates(DataLoaders.from_dblock)
def from_folder(cls, path, train='train', valid='valid', valid_pct=None, seed=None, vocab=None, text_vocab=None, is_lm=False,
tok_tfm=None, seq_len=72, backwards=False, **kwargs):
"Create from imagenet style dataset in `path` with `train` and `valid` subfolders (or provide `valid_pct`)"
splitter = GrandparentSplitter(train_name=train, valid_name=valid) if valid_pct is None else RandomSplitter(valid_pct, seed=seed)
blocks = [TextBlock.from_folder(path, text_vocab, is_lm, seq_len, backwards) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len, backwards)]
if not is_lm: blocks.append(CategoryBlock(vocab=vocab))
get_items = partial(get_text_files, folders=[train,valid]) if valid_pct is None else get_text_files
dblock = DataBlock(blocks=blocks,
get_items=get_items,
splitter=splitter,
get_y=None if is_lm else parent_label)
return cls.from_dblock(dblock, path, path=path, seq_len=seq_len, **kwargs)
@classmethod
@delegates(DataLoaders.from_dblock)
def from_df(cls, df, path='.', valid_pct=0.2, seed=None, text_col=0, label_col=1, label_delim=None, y_block=None,
text_vocab=None, is_lm=False, valid_col=None, tok_tfm=None, seq_len=72, backwards=False, **kwargs):
"Create from `df` in `path` with `valid_pct`"
blocks = [TextBlock.from_df(text_col, text_vocab, is_lm, seq_len, backwards) if tok_tfm is None else TextBlock(tok_tfm, text_vocab, is_lm, seq_len, backwards)]
if y_block is None and not is_lm:
blocks.append(MultiCategoryBlock if is_listy(label_col) and len(label_col) > 1 else CategoryBlock)
if y_block is not None and not is_lm: blocks += (y_block if is_listy(y_block) else [y_block])
splitter = RandomSplitter(valid_pct, seed=seed) if valid_col is None else ColSplitter(valid_col)
dblock = DataBlock(blocks=blocks,
get_x=ColReader("text"),
get_y=None if is_lm else ColReader(label_col, label_delim=label_delim),
splitter=splitter)
return cls.from_dblock(dblock, df, path=path, seq_len=seq_len, **kwargs)
@classmethod
def from_csv(cls, path, csv_fname='labels.csv', header='infer', delimiter=None, **kwargs):
"Create from `csv` file in `path/csv_fname`"
df = pd.read_csv(Path(path)/csv_fname, header=header, delimiter=delimiter)
return cls.from_df(df, path=path, **kwargs)
TextDataLoaders.from_csv = delegates(to=TextDataLoaders.from_df)(TextDataLoaders.from_csv)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/data.py
|
data.py
|
from ..basics import *
from ..callback.all import *
from .core import *
from .data import *
from .models import *
from .learner import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/all.py
|
all.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/37_text.learner.ipynb (unless otherwise specified).
__all__ = ['match_embeds', 'load_ignore_keys', 'clean_raw_keys', 'load_model_text', 'TextLearner', 'decode_spec_tokens',
'LMLearner', 'language_model_learner', 'text_classifier_learner']
# Cell
from ..basics import *
from .core import *
from .data import *
from .models.core import *
from .models.awdlstm import *
from ..callback.rnn import *
from ..callback.progress import *
# Cell
def match_embeds(old_wgts, old_vocab, new_vocab):
"Convert the embedding in `old_wgts` to go from `old_vocab` to `new_vocab`."
bias, wgts = old_wgts.get('1.decoder.bias', None), old_wgts['0.encoder.weight']
wgts_m = wgts.mean(0)
new_wgts = wgts.new_zeros((len(new_vocab),wgts.size(1)))
if bias is not None:
bias_m = bias.mean(0)
new_bias = bias.new_zeros((len(new_vocab),))
old_o2i = old_vocab.o2i if hasattr(old_vocab, 'o2i') else {w:i for i,w in enumerate(old_vocab)}
for i,w in enumerate(new_vocab):
idx = old_o2i.get(w, -1)
new_wgts[i] = wgts[idx] if idx>=0 else wgts_m
if bias is not None: new_bias[i] = bias[idx] if idx>=0 else bias_m
old_wgts['0.encoder.weight'] = new_wgts
if '0.encoder_dp.emb.weight' in old_wgts: old_wgts['0.encoder_dp.emb.weight'] = new_wgts.clone()
old_wgts['1.decoder.weight'] = new_wgts.clone()
if bias is not None: old_wgts['1.decoder.bias'] = new_bias
return old_wgts
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
def load_ignore_keys(model, wgts):
"Load `wgts` in `model` ignoring the names of the keys, just taking parameters in order"
sd = model.state_dict()
for k1,k2 in zip(sd.keys(), wgts.keys()): sd[k1].data = wgts[k2].data.clone()
return model.load_state_dict(sd)
# Cell
def _rm_module(n):
t = n.split('.')
for i in range(len(t)-1, -1, -1):
if t[i] == 'module':
t.pop(i)
break
return '.'.join(t)
# Cell
#For previous versions compatibility, remove for release
def clean_raw_keys(wgts):
keys = list(wgts.keys())
for k in keys:
t = k.split('.module')
if f'{_rm_module(k)}_raw' in keys: del wgts[k]
return wgts
# Cell
#For previous versions compatibility, remove for release
def load_model_text(file, model, opt, with_opt=None, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
distrib_barrier()
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(clean_raw_keys(model_state), strict=strict)
if hasopt and ifnone(with_opt,True):
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# Cell
@delegates(Learner.__init__)
class TextLearner(Learner):
"Basic class for a `Learner` in NLP."
def __init__(self, dls, model, alpha=2., beta=1., moms=(0.8,0.7,0.8), **kwargs):
super().__init__(dls, model, moms=moms, **kwargs)
self.add_cbs(rnn_cbs())
def save_encoder(self, file):
"Save the encoder to `file` in the model directory"
if rank_distrib(): return # don't save if child proc
encoder = get_model(self.model)[0]
if hasattr(encoder, 'module'): encoder = encoder.module
torch.save(encoder.state_dict(), join_path_file(file, self.path/self.model_dir, ext='.pth'))
def load_encoder(self, file, device=None):
"Load the encoder `file` from the model directory, optionally ensuring it's on `device`"
encoder = get_model(self.model)[0]
if device is None: device = self.dls.device
if hasattr(encoder, 'module'): encoder = encoder.module
distrib_barrier()
wgts = torch.load(join_path_file(file,self.path/self.model_dir, ext='.pth'), map_location=device)
encoder.load_state_dict(clean_raw_keys(wgts))
self.freeze()
return self
def load_pretrained(self, wgts_fname, vocab_fname, model=None):
"Load a pretrained model and adapt it to the data vocabulary."
old_vocab = load_pickle(vocab_fname)
new_vocab = _get_text_vocab(self.dls)
distrib_barrier()
wgts = torch.load(wgts_fname, map_location = lambda storage,loc: storage)
if 'model' in wgts: wgts = wgts['model'] #Just in case the pretrained model was saved with an optimizer
wgts = match_embeds(wgts, old_vocab, new_vocab)
load_ignore_keys(self.model if model is None else model, clean_raw_keys(wgts))
self.freeze()
return self
#For previous versions compatibility. Remove at release
@delegates(load_model_text)
def load(self, file, with_opt=None, device=None, **kwargs):
if device is None: device = self.dls.device
if self.opt is None: self.create_opt()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model_text(file, self.model, self.opt, device=device, **kwargs)
return self
# Cell
def decode_spec_tokens(tokens):
"Decode the special tokens in `tokens`"
new_toks,rule,arg = [],None,None
for t in tokens:
if t in [TK_MAJ, TK_UP, TK_REP, TK_WREP]: rule = t
elif rule is None: new_toks.append(t)
elif rule == TK_MAJ:
new_toks.append(t[:1].upper() + t[1:].lower())
rule = None
elif rule == TK_UP:
new_toks.append(t.upper())
rule = None
elif arg is None:
try: arg = int(t)
except: rule = None
else:
if rule == TK_REP: new_toks.append(t * arg)
else: new_toks += [t] * arg
return new_toks
# Cell
class LMLearner(TextLearner):
"Add functionality to `TextLearner` when dealing with a language model"
def predict(self, text, n_words=1, no_unk=True, temperature=1., min_p=None, no_bar=False,
decoder=decode_spec_tokens, only_last_word=False):
"Return `text` and the `n_words` that come after"
self.model.reset()
idxs = idxs_all = self.dls.test_dl([text]).items[0].to(self.dls.device)
if no_unk: unk_idx = self.dls.vocab.index(UNK)
for _ in (range(n_words) if no_bar else progress_bar(range(n_words), leave=False)):
with self.no_bar(): preds,_ = self.get_preds(dl=[(idxs[None],)])
res = preds[0][-1]
if no_unk: res[unk_idx] = 0.
if min_p is not None:
if (res >= min_p).float().sum() == 0:
warn(f"There is no item with probability >= {min_p}, try a lower value.")
else: res[res < min_p] = 0.
if temperature != 1.: res.pow_(1 / temperature)
idx = torch.multinomial(res, 1).item()
idxs = idxs_all = torch.cat([idxs_all, idxs.new([idx])])
if only_last_word: idxs = idxs[-1][None]
num = self.dls.train_ds.numericalize
tokens = [num.vocab[i] for i in idxs_all if num.vocab[i] not in [BOS, PAD]]
sep = self.dls.train_ds.tokenizer.sep
return sep.join(decoder(tokens))
@delegates(Learner.get_preds)
def get_preds(self, concat_dim=1, **kwargs): return super().get_preds(concat_dim=1, **kwargs)
# Cell
from .models.core import _model_meta
# Cell
def _get_text_vocab(dls):
vocab = dls.vocab
if isinstance(vocab, L): vocab = vocab[0]
return vocab
# Cell
@delegates(Learner.__init__)
def language_model_learner(dls, arch, config=None, drop_mult=1., backwards=False, pretrained=True, pretrained_fnames=None, **kwargs):
"Create a `Learner` with a language model from `dls` and `arch`."
vocab = _get_text_vocab(dls)
model = get_language_model(arch, len(vocab), config=config, drop_mult=drop_mult)
meta = _model_meta[arch]
learn = LMLearner(dls, model, loss_func=CrossEntropyLossFlat(), splitter=meta['split_lm'], **kwargs)
url = 'url_bwd' if backwards else 'url'
if pretrained or pretrained_fnames:
if pretrained_fnames is not None:
fnames = [learn.path/learn.model_dir/f'{fn}.{ext}' for fn,ext in zip(pretrained_fnames, ['pth', 'pkl'])]
else:
if url not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta[url] , c_key='model')
try: fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
except IndexError: print(f'The model in {model_path} is incomplete, download again'); raise
learn = learn.load_pretrained(*fnames)
return learn
# Cell
@delegates(Learner.__init__)
def text_classifier_learner(dls, arch, seq_len=72, config=None, backwards=False, pretrained=True, drop_mult=0.5, n_out=None,
lin_ftrs=None, ps=None, max_len=72*20, y_range=None, **kwargs):
"Create a `Learner` with a text classifier from `dls` and `arch`."
vocab = _get_text_vocab(dls)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
model = get_text_classifier(arch, len(vocab), n_out, seq_len=seq_len, config=config, y_range=y_range,
drop_mult=drop_mult, lin_ftrs=lin_ftrs, ps=ps, max_len=max_len)
meta = _model_meta[arch]
learn = TextLearner(dls, model, splitter=meta['split_clas'], **kwargs)
url = 'url_bwd' if backwards else 'url'
if pretrained:
if url not in meta:
warn("There are no pretrained weights for that architecture yet!")
return learn
model_path = untar_data(meta[url], c_key='model')
try: fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]
except IndexError: print(f'The model in {model_path} is incomplete, download again'); raise
learn = learn.load_pretrained(*fnames, model=learn.model[0])
learn.freeze()
return learn
# Cell
@typedispatch
def show_results(x: LMTensorText, y, samples, outs, ctxs=None, max_n=10, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
for i,l in enumerate(['input', 'target']):
ctxs = [b.show(ctx=c, label=l, **kwargs) for b,c,_ in zip(samples.itemgot(i),ctxs,range(max_n))]
ctxs = [b.show(ctx=c, label='pred', **kwargs) for b,c,_ in zip(outs.itemgot(0),ctxs,range(max_n))]
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def show_results(x: TensorText, y, samples, outs, ctxs=None, max_n=10, trunc_at=150, **kwargs):
if ctxs is None: ctxs = get_empty_df(min(len(samples), max_n))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
ctxs = show_results[object](x, y, samples, outs, ctxs=ctxs, max_n=max_n, **kwargs)
display_df(pd.DataFrame(ctxs))
return ctxs
# Cell
@typedispatch
def plot_top_losses(x: TensorText, y:TensorCategory, samples, outs, raws, losses, trunc_at=150, **kwargs):
rows = get_empty_df(len(samples))
samples = L((s[0].truncate(trunc_at),*s[1:]) for s in samples)
for i,l in enumerate(['input', 'target']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(samples.itemgot(i),rows)]
outs = L(o + (TitledFloat(r.max().item()), TitledFloat(l.item())) for o,r,l in zip(outs, raws, losses))
for i,l in enumerate(['predicted', 'probability', 'loss']):
rows = [b.show(ctx=c, label=l, **kwargs) for b,c in zip(outs.itemgot(i),rows)]
display_df(pd.DataFrame(rows))
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/learner.py
|
learner.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/30_text.core.ipynb (unless otherwise specified).
__all__ = ['UNK', 'PAD', 'BOS', 'EOS', 'FLD', 'TK_REP', 'TK_WREP', 'TK_UP', 'TK_MAJ', 'spec_add_spaces',
'rm_useless_spaces', 'replace_rep', 'replace_wrep', 'fix_html', 'replace_all_caps', 'replace_maj',
'lowercase', 'replace_space', 'BaseTokenizer', 'SpacyTokenizer', 'WordTokenizer', 'TokenizeWithRules',
'tokenize1', 'parallel_tokenize', 'fn_counter_pkl', 'fn_lengths_pkl', 'tokenize_folder', 'tokenize_files',
'tokenize_texts', 'tokenize_df', 'tokenize_csv', 'load_tokenized_csv', 'Tokenizer', 'eu_langs',
'SentencePieceTokenizer', 'SubwordTokenizer']
# Cell
from ..torch_basics import *
from ..data.all import *
# Cell
import spacy,html
from spacy.symbols import ORTH
# Cell
#special tokens
UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ = "xxunk xxpad xxbos xxeos xxfld xxrep xxwrep xxup xxmaj".split()
# Cell
#nbdev_comment _all_ = ["UNK", "PAD", "BOS", "EOS", "FLD", "TK_REP", "TK_WREP", "TK_UP", "TK_MAJ"]
# Cell
_re_spec = re.compile(r'([/#\\])')
def spec_add_spaces(t):
"Add spaces around / and #"
return _re_spec.sub(r' \1 ', t)
# Cell
_re_space = re.compile(' {2,}')
def rm_useless_spaces(t):
"Remove multiple spaces"
return _re_space.sub(' ', t)
# Cell
_re_rep = re.compile(r'(\S)(\1{2,})')
def replace_rep(t):
"Replace repetitions at the character level: cccc -- TK_REP 4 c"
def _replace_rep(m):
c,cc = m.groups()
return f' {TK_REP} {len(cc)+1} {c} '
return _re_rep.sub(_replace_rep, t)
# Cell
_re_wrep = re.compile(r'(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)')
# Cell
def replace_wrep(t):
"Replace word repetitions: word word word word -- TK_WREP 4 word"
def _replace_wrep(m):
c,cc,e = m.groups()
return f' {TK_WREP} {len(cc.split())+2} {c} {e}'
return _re_wrep.sub(_replace_wrep, t)
# Cell
def fix_html(x):
"Various messy things we've seen in documents"
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace('nbsp;', ' ').replace(
'#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace('<br />', "\n").replace(
'\\"', '"').replace('<unk>',UNK).replace(' @.@ ','.').replace(' @-@ ','-').replace('...',' …')
return html.unescape(x)
# Cell
_re_all_caps = re.compile(r'(\s|^)([A-Z]+[^a-z\s]*)(?=(\s|$))')
# Cell
def replace_all_caps(t):
"Replace tokens in ALL CAPS by their lower version and add `TK_UP` before."
def _replace_all_caps(m):
tok = f'{TK_UP} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_all_caps.sub(_replace_all_caps, t)
# Cell
_re_maj = re.compile(r'(\s|^)([A-Z][^A-Z\s]*)(?=(\s|$))')
# Cell
def replace_maj(t):
"Replace tokens in Sentence Case by their lower version and add `TK_MAJ` before."
def _replace_maj(m):
tok = f'{TK_MAJ} ' if len(m.groups()[1]) > 1 else ''
return f"{m.groups()[0]}{tok}{m.groups()[1].lower()}"
return _re_maj.sub(_replace_maj, t)
# Cell
def lowercase(t, add_bos=True, add_eos=False):
"Converts `t` to lowercase"
return (f'{BOS} ' if add_bos else '') + t.lower().strip() + (f' {EOS}' if add_eos else '')
# Cell
def replace_space(t):
"Replace embedded spaces in a token with unicode line char to allow for split/join"
return t.replace(' ', '▁')
# Cell
defaults.text_spec_tok = [UNK, PAD, BOS, EOS, FLD, TK_REP, TK_WREP, TK_UP, TK_MAJ]
defaults.text_proc_rules = [fix_html, replace_rep, replace_wrep, spec_add_spaces, rm_useless_spaces,
replace_all_caps, replace_maj, lowercase]
defaults.text_postproc_rules = [replace_space]
# Cell
class BaseTokenizer():
"Basic tokenizer that just splits on spaces"
def __init__(self, split_char=' ', **kwargs): self.split_char=split_char
def __call__(self, items): return (t.split(self.split_char) for t in items)
# Cell
class SpacyTokenizer():
"Spacy tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, buf_sz=5000):
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
nlp = spacy.blank(lang, disable=["parser", "tagger", "ner"])
for w in self.special_toks: nlp.tokenizer.add_special_case(w, [{ORTH: w}])
self.pipe,self.buf_sz = nlp.pipe,buf_sz
def __call__(self, items):
return (L(doc).attrgot('text') for doc in self.pipe(map(str,items), batch_size=self.buf_sz))
# Cell
WordTokenizer = SpacyTokenizer
# Cell
class TokenizeWithRules:
"A wrapper around `tok` which applies `rules`, then tokenizes, then applies `post_rules`"
def __init__(self, tok, rules=None, post_rules=None):
self.rules = L(ifnone(rules, defaults.text_proc_rules))
self.post_f = compose(*L(ifnone(post_rules, defaults.text_postproc_rules)))
self.tok = tok
def __call__(self, batch):
return (L(o).map(self.post_f) for o in self.tok(maps(*self.rules, batch)))
# Cell
@delegates(TokenizeWithRules)
def tokenize1(text, tok, **kwargs):
"Call `TokenizeWithRules` with a single text"
return first(TokenizeWithRules(tok=tok, **kwargs)([text]))
# Cell
def parallel_tokenize(items, tok=None, rules=None, n_workers=defaults.cpus, **kwargs):
"Calls optional `setup` on `tok` before launching `TokenizeWithRules` using `parallel_gen"
if tok is None: tok = WordTokenizer()
if hasattr(tok, 'setup'): tok.setup(items, rules)
return parallel_gen(TokenizeWithRules, items, tok=tok, rules=rules, n_workers=n_workers, **kwargs)
# Cell
fn_counter_pkl = 'counter.pkl'
fn_lengths_pkl = 'lengths.pkl'
# Cell
def _tokenize_files(func, files, path, output_dir=None, output_names=None, n_workers=defaults.cpus, rules=None, tok=None,
encoding='utf8', skip_if_exists=False):
"Tokenize text `files` in parallel using `n_workers`"
if tok is None: tok = WordTokenizer()
output_dir = Path(ifnone(output_dir, path.parent/f'{path.name}_tok'))
if skip_if_exists and output_dir.exists(): return output_dir
output_dir.mkdir(exist_ok=True)
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
rules = partial(Path.read_text, encoding=encoding) + L(ifnone(rules, defaults.text_proc_rules.copy()))
lengths,counter = {},Counter()
for i,tok in parallel_tokenize(files, tok, rules, n_workers=n_workers):
out = func(i,output_dir)
out.mk_write(' '.join(tok))
lengths[str(files[i].relative_to(path))] = len(tok)
counter.update(tok)
save_pickle(output_dir/fn_lengths_pkl, lengths)
save_pickle(output_dir/fn_counter_pkl, counter)
return output_dir
# Cell
@delegates(_tokenize_files)
def tokenize_folder(path, extensions=None, folders=None, output_dir=None, skip_if_exists=True, **kwargs):
"Tokenize text files in `path` in parallel using `n_workers`"
path,extensions = Path(path),ifnone(extensions, ['.txt'])
files = get_files(path, extensions=extensions, recurse=True, folders=folders)
def _f(i,output_dir): return output_dir/files[i].relative_to(path)
return _tokenize_files(_f, files, path, skip_if_exists=skip_if_exists, **kwargs)
# Cell
@delegates(_tokenize_files)
def tokenize_files(files, path, output_dir, output_names=None, **kwargs):
"Tokenize text `files` in parallel using `n_workers`"
if output_names is None: output_names = L(output_dir/f.relative_to(path) for f in files)
def _f(i,output_dir): return output_dir/output_names[i]
return _tokenize_files(_f, files, path, output_dir=output_dir, **kwargs)
# Cell
def _join_texts(df, mark_fields=False):
"Join texts in row `idx` of `df`, marking each field with `FLD` if `mark_fields=True`"
text_col = (f'{FLD} {1} ' if mark_fields else '' ) + df.iloc[:,0].astype(str)
for i in range(1,len(df.columns)):
text_col += (f' {FLD} {i+1} ' if mark_fields else ' ') + df.iloc[:,i].astype(str)
return text_col.values
# Cell
def tokenize_texts(texts, n_workers=defaults.cpus, rules=None, tok=None):
"Tokenize `texts` in parallel using `n_workers`"
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
outputs = L(parallel_tokenize(texts, tok=tok, rules=rules, n_workers=n_workers)
).sorted().itemgot(1)
return outputs
# Cell
def tokenize_df(df, text_cols, n_workers=defaults.cpus, rules=None, mark_fields=None,
tok=None, tok_text_col="text"):
"Tokenize texts in `df[text_cols]` in parallel using `n_workers` and stores them in `df[tok_text_col]`"
text_cols = [df.columns[c] if isinstance(c, int) else c for c in L(text_cols)]
#mark_fields defaults to False if there is one column of texts, True if there are multiple
if mark_fields is None: mark_fields = len(text_cols)>1
rules = L(ifnone(rules, defaults.text_proc_rules.copy()))
texts = _join_texts(df[text_cols], mark_fields=mark_fields)
outputs = L(parallel_tokenize(texts, tok, rules, n_workers=n_workers)
).sorted().itemgot(1)
other_cols = df.columns[~df.columns.isin(text_cols)]
res = df[other_cols].copy()
res[tok_text_col] = outputs
res[f'{tok_text_col}_length'] = [len(o) for o in outputs]
return res,Counter(outputs.concat())
# Cell
def tokenize_csv(fname, text_cols, outname=None, n_workers=4, rules=None, mark_fields=None,
tok=None, header='infer', chunksize=50000):
"Tokenize texts in the `text_cols` of the csv `fname` in parallel using `n_workers`"
df = pd.read_csv(fname, header=header, chunksize=chunksize)
outname = Path(ifnone(outname, fname.parent/f'{fname.stem}_tok.csv'))
cnt = Counter()
for i,dfp in enumerate(df):
out,c = tokenize_df(dfp, text_cols, n_workers=n_workers, rules=rules,
mark_fields=mark_fields, tok=tok)
out.text = out.text.str.join(' ')
out.to_csv(outname, header=(None,header)[i==0], index=False, mode=('a','w')[i==0])
cnt.update(c)
save_pickle(outname.with_suffix('.pkl'), cnt)
# Cell
def load_tokenized_csv(fname):
"Utility function to quickly load a tokenized csv ans the corresponding counter"
fname = Path(fname)
out = pd.read_csv(fname)
for txt_col in out.columns[1:-1]:
out[txt_col] = tuple(out[txt_col].str.split(' '))
return out,load_pickle(fname.with_suffix('.pkl'))
# Cell
class Tokenizer(Transform):
"Provides a consistent `Transform` interface to tokenizers operating on `DataFrame`s and folders"
input_types = (str, list, L, tuple, Path)
def __init__(self, tok, rules=None, counter=None, lengths=None, mode=None, sep=' '):
if isinstance(tok,type): tok=tok()
store_attr('tok,counter,lengths,mode,sep')
self.rules = defaults.text_proc_rules if rules is None else rules
@classmethod
@delegates(tokenize_df, keep=True)
def from_df(cls, text_cols, tok=None, rules=None, sep=' ', **kwargs):
if tok is None: tok = WordTokenizer()
res = cls(tok, rules=rules, mode='df')
res.kwargs,res.train_setup = merge({'tok': tok}, kwargs),False
res.text_cols,res.sep = text_cols,sep
return res
@classmethod
@delegates(tokenize_folder, keep=True)
def from_folder(cls, path, tok=None, rules=None, **kwargs):
path = Path(path)
if tok is None: tok = WordTokenizer()
output_dir = tokenize_folder(path, tok=tok, rules=rules, **kwargs)
res = cls(tok, counter=load_pickle(output_dir/fn_counter_pkl),
lengths=load_pickle(output_dir/fn_lengths_pkl), rules=rules, mode='folder')
res.path,res.output_dir = path,output_dir
return res
def setups(self, dsets):
if not self.mode == 'df' or not isinstance(dsets.items, pd.DataFrame): return
dsets.items,count = tokenize_df(dsets.items, self.text_cols, rules=self.rules, **self.kwargs)
if self.counter is None: self.counter = count
return dsets
def encodes(self, o:Path):
if self.mode=='folder' and str(o).startswith(str(self.path)):
tok = self.output_dir/o.relative_to(self.path)
return L(tok.read_text().split(' '))
else: return self._tokenize1(o.read_text())
def encodes(self, o:str): return self._tokenize1(o)
def _tokenize1(self, o): return first(self.tok([compose(*self.rules)(o)]))
def get_lengths(self, items):
if self.lengths is None: return None
if self.mode == 'df':
if isinstance(items, pd.DataFrame) and 'text_lengths' in items.columns: return items['text_length'].values
if self.mode == 'folder':
try:
res = [self.lengths[str(Path(i).relative_to(self.path))] for i in items]
if len(res) == len(items): return res
except: return None
def decodes(self, o): return TitledStr(self.sep.join(o))
# Cell
eu_langs = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu",
"it","lt","lv","mt","nl","pl","pt","ro","sk","sl","sv"] # all European langs
# Cell
class SentencePieceTokenizer():#TODO: pass the special tokens symbol to sp
"SentencePiece tokenizer for `lang`"
def __init__(self, lang='en', special_toks=None, sp_model=None, vocab_sz=None, max_vocab_sz=30000,
model_type='unigram', char_coverage=None, cache_dir='tmp'):
try: from sentencepiece import SentencePieceTrainer,SentencePieceProcessor
except ImportError:
raise Exception('sentencepiece module is missing: run `pip install sentencepiece!=0.1.90,!=0.1.91`')
self.sp_model,self.cache_dir = sp_model,Path(cache_dir)
self.vocab_sz,self.max_vocab_sz,self.model_type = vocab_sz,max_vocab_sz,model_type
self.char_coverage = ifnone(char_coverage, 0.99999 if lang in eu_langs else 0.9998)
self.special_toks = ifnone(special_toks, defaults.text_spec_tok)
if sp_model is None: self.tok = None
else:
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
os.makedirs(self.cache_dir, exist_ok=True)
def _get_vocab_sz(self, raw_text_path):
cnt = Counter()
with open(raw_text_path, 'r') as f:
for line in f.readlines():
cnt.update(line.split())
if len(cnt)//4 > self.max_vocab_sz: return self.max_vocab_sz
res = len(cnt)//4
while res%8 != 0: res+=1
return max(res,29)
def train(self, raw_text_path):
"Train a sentencepiece tokenizer on `texts` and save it in `path/tmp_dir`"
from sentencepiece import SentencePieceTrainer
vocab_sz = self._get_vocab_sz(raw_text_path) if self.vocab_sz is None else self.vocab_sz
spec_tokens = ['\u2581'+s for s in self.special_toks]
SentencePieceTrainer.Train(" ".join([
f"--input={raw_text_path} --vocab_size={vocab_sz} --model_prefix={self.cache_dir/'spm'}",
f"--character_coverage={self.char_coverage} --model_type={self.model_type}",
f"--unk_id={len(spec_tokens)} --pad_id=-1 --bos_id=-1 --eos_id=-1 --minloglevel=2",
f"--user_defined_symbols={','.join(spec_tokens)} --hard_vocab_limit=false"]))
raw_text_path.unlink()
return self.cache_dir/'spm.model'
def setup(self, items, rules=None):
from sentencepiece import SentencePieceProcessor
if rules is None: rules = []
if self.tok is not None: return {'sp_model': self.sp_model}
raw_text_path = self.cache_dir/'texts.out'
with open(raw_text_path, 'w') as f:
for t in progress_bar(maps(*rules, items), total=len(items), leave=False):
f.write(f'{t}\n')
sp_model = self.train(raw_text_path)
self.tok = SentencePieceProcessor()
self.tok.Load(str(sp_model))
return {'sp_model': sp_model}
def __call__(self, items):
if self.tok is None: self.setup(items)
for t in items: yield self.tok.EncodeAsPieces(t)
# Cell
SubwordTokenizer = SentencePieceTokenizer
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/core.py
|
core.py
|
from .all import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/__init__.py
|
__init__.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/36_text.models.qrnn.ipynb (unless otherwise specified).
__all__ = ['load_cpp', 'forget_mult_cuda', 'bwd_forget_mult_cuda', 'dispatch_cuda', 'forget_mult_CPU', 'ForgetMultGPU',
'QRNNLayer', 'QRNN']
# Cell
from ...data.all import *
from ..core import *
from .awdlstm import dropout_mask
# Cell
from torch.utils import cpp_extension
from torch.autograd import Function
# Cell
def load_cpp(name, files, path):
os.makedirs(Config().model/'qrnn', exist_ok=True)
return cpp_extension.load(name=name, sources=[path/f for f in files], build_directory=Config().model/'qrnn')
# Cell
class _LazyBuiltModule():
"A module with a CPP extension that builds itself at first use"
def __init__(self, name, files): self.name,self.files,self.mod = name,files,None
def _build(self):
self.mod = load_cpp(name=self.name, files=self.files, path=Path(__file__).parent)
def forward(self, *args, **kwargs):
if self.mod is None: self._build()
return self.mod.forward(*args, **kwargs)
def backward(self, *args, **kwargs):
if self.mod is None: self._build()
return self.mod.backward(*args, **kwargs)
# Cell
forget_mult_cuda = _LazyBuiltModule('forget_mult_cuda', ['forget_mult_cuda.cpp', 'forget_mult_cuda_kernel.cu'])
bwd_forget_mult_cuda = _LazyBuiltModule('bwd_forget_mult_cuda', ['bwd_forget_mult_cuda.cpp', 'bwd_forget_mult_cuda_kernel.cu'])
# Cell
def dispatch_cuda(cuda_class, cpu_func, x):
"Depending on `x.device` uses `cpu_func` or `cuda_class.apply`"
return cuda_class.apply if x.device.type == 'cuda' else cpu_func
# Cell
def forget_mult_CPU(x, f, first_h=None, batch_first=True, backward=False):
"ForgetMult gate applied to `x` and `f` on the CPU."
result = []
dim = (1 if batch_first else 0)
forgets = f.split(1, dim=dim)
inputs = x.split(1, dim=dim)
prev_h = None if first_h is None else first_h.unsqueeze(dim)
idx_range = range(len(inputs)-1,-1,-1) if backward else range(len(inputs))
for i in idx_range:
prev_h = inputs[i] * forgets[i] if prev_h is None else inputs[i] * forgets[i] + (1-forgets[i]) * prev_h
if backward: result.insert(0, prev_h)
else: result.append(prev_h)
return torch.cat(result, dim=dim)
# Cell
class ForgetMultGPU(Function):
"Wrapper around the CUDA kernels for the ForgetMult gate."
@staticmethod
def forward(ctx, x, f, first_h=None, batch_first=True, backward=False):
ind = -1 if backward else 0
(i,j) = (0,1) if batch_first else (1,0)
output = x.new_zeros(x.shape[0]+i, x.shape[1]+j, x.shape[2])
if first_h is not None:
if batch_first: output[:, ind] = first_h
else: output[ind] = first_h
else: output.zero_()
ctx.forget_mult = bwd_forget_mult_cuda if backward else forget_mult_cuda
output = ctx.forget_mult.forward(x, f, output, batch_first)
ctx.save_for_backward(x, f, first_h, output)
ctx.batch_first = batch_first
if backward: return output[:,:-1] if batch_first else output[:-1]
else: return output[:,1:] if batch_first else output[1:]
@staticmethod
def backward(ctx, grad_output):
x, f, first_h, output = ctx.saved_tensors
grad_x, grad_f, grad_h = ctx.forget_mult.backward(x, f, output, grad_output, ctx.batch_first)
return (grad_x, grad_f, (None if first_h is None else grad_h), None, None)
# Cell
class QRNNLayer(Module):
"Apply a single layer Quasi-Recurrent Neural Network (QRNN) to an input sequence."
def __init__(self, input_size, hidden_size=None, save_prev_x=False, zoneout=0, window=1,
output_gate=True, batch_first=True, backward=False):
assert window in [1, 2], "This QRNN implementation currently only handles convolutional window of size 1 or size 2"
self.save_prev_x,self.zoneout,self.window = save_prev_x,zoneout,window
self.output_gate,self.batch_first,self.backward = output_gate,batch_first,backward
hidden_size = ifnone(hidden_size, input_size)
#One large matmul with concat is faster than N small matmuls and no concat
mult = (3 if output_gate else 2)
self.linear = nn.Linear(window * input_size, mult * hidden_size)
self.prevX = None
def reset(self): self.prevX = None
def forward(self, inp, hid=None):
y = self.linear(self._get_source(inp))
if self.output_gate: z_gate,f_gate,o_gate = y.chunk(3, dim=2)
else: z_gate,f_gate = y.chunk(2, dim=2)
z_gate,f_gate = z_gate.tanh(),f_gate.sigmoid()
if self.zoneout and self.training:
f_gate *= dropout_mask(f_gate, f_gate.size(), self.zoneout).requires_grad_(False)
forget_mult = dispatch_cuda(ForgetMultGPU, partial(forget_mult_CPU), inp)
c_gate = forget_mult(z_gate, f_gate, hid, self.batch_first, self.backward)
output = torch.sigmoid(o_gate) * c_gate if self.output_gate else c_gate
if self.window > 1 and self.save_prev_x:
if self.backward: self.prevX = (inp[:, :1] if self.batch_first else inp[:1]) .detach()
else: self.prevX = (inp[:, -1:] if self.batch_first else inp[-1:]).detach()
idx = 0 if self.backward else -1
return output, (c_gate[:, idx] if self.batch_first else c_gate[idx])
def _get_source(self, inp):
if self.window == 1: return inp
dim = (1 if self.batch_first else 0)
if self.batch_first:
prev = torch.zeros_like(inp[:,:1]) if self.prevX is None else self.prevX
if prev.shape[0] < inp.shape[0]: prev = torch.cat([prev, torch.zeros_like(inp[prev.shape[0]:, :1])], dim=0)
if prev.shape[0] > inp.shape[0]: prev= prev[:inp.shape[0]]
else:
prev = torch.zeros_like(inp[:1]) if self.prevX is None else self.prevX
if prev.shape[1] < inp.shape[1]: prev = torch.cat([prev, torch.zeros_like(inp[:1, prev.shape[0]:])], dim=1)
if prev.shape[1] > inp.shape[1]: prev= prev[:,:inp.shape[1]]
inp_shift = [prev]
if self.backward: inp_shift.insert(0,inp[:,1:] if self.batch_first else inp[1:])
else: inp_shift.append(inp[:,:-1] if self.batch_first else inp[:-1])
inp_shift = torch.cat(inp_shift, dim)
return torch.cat([inp, inp_shift], 2)
# Cell
class QRNN(Module):
"Apply a multiple layer Quasi-Recurrent Neural Network (QRNN) to an input sequence."
def __init__(self, input_size, hidden_size, n_layers=1, batch_first=True, dropout=0,
bidirectional=False, save_prev_x=False, zoneout=0, window=None, output_gate=True):
assert not (save_prev_x and bidirectional), "Can't save the previous X with bidirectional."
kwargs = dict(batch_first=batch_first, zoneout=zoneout, output_gate=output_gate)
self.layers = nn.ModuleList([QRNNLayer(input_size if l == 0 else hidden_size, hidden_size, save_prev_x=save_prev_x,
window=((2 if l ==0 else 1) if window is None else window), **kwargs)
for l in range(n_layers)])
if bidirectional:
self.layers_bwd = nn.ModuleList([QRNNLayer(input_size if l == 0 else hidden_size, hidden_size,
backward=True, window=((2 if l ==0 else 1) if window is None else window),
**kwargs) for l in range(n_layers)])
self.n_layers,self.batch_first,self.dropout,self.bidirectional = n_layers,batch_first,dropout,bidirectional
def reset(self):
"Reset the hidden state."
for layer in self.layers: layer.reset()
if self.bidirectional:
for layer in self.layers_bwd: layer.reset()
def forward(self, inp, hid=None):
new_hid = []
if self.bidirectional: inp_bwd = inp.clone()
for i, layer in enumerate(self.layers):
inp, h = layer(inp, None if hid is None else hid[2*i if self.bidirectional else i])
new_hid.append(h)
if self.bidirectional:
inp_bwd, h_bwd = self.layers_bwd[i](inp_bwd, None if hid is None else hid[2*i+1])
new_hid.append(h_bwd)
if self.dropout != 0 and i < len(self.layers) - 1:
for o in ([inp, inp_bwd] if self.bidirectional else [inp]):
o = F.dropout(o, p=self.dropout, training=self.training, inplace=False)
if self.bidirectional: inp = torch.cat([inp, inp_bwd], dim=2)
return inp, torch.stack(new_hid, 0)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/models/qrnn.py
|
qrnn.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/32_text.models.awdlstm.ipynb (unless otherwise specified).
__all__ = ['dropout_mask', 'RNNDropout', 'WeightDropout', 'EmbeddingDropout', 'AWD_LSTM', 'awd_lstm_lm_split',
'awd_lstm_lm_config', 'awd_lstm_clas_split', 'awd_lstm_clas_config', 'AWD_QRNN', 'awd_qrnn_lm_config',
'awd_qrnn_clas_config']
# Cell
from ...data.all import *
from ..core import *
# Cell
def dropout_mask(x, sz, p):
"Return a dropout mask of the same type as `x`, size `sz`, with probability `p` to cancel an element."
return x.new_empty(*sz).bernoulli_(1-p).div_(1-p)
# Cell
class RNNDropout(Module):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p=0.5): self.p=p
def forward(self, x):
if not self.training or self.p == 0.: return x
return x * dropout_mask(x.data, (x.size(0), 1, *x.shape[2:]), self.p)
# Cell
class WeightDropout(Module):
"A module that wraps another layer in which some weights will be replaced by 0 during training."
def __init__(self, module, weight_p, layer_names='weight_hh_l0'):
self.module,self.weight_p,self.layer_names = module,weight_p,L(layer_names)
for layer in self.layer_names:
#Makes a copy of the weights of the selected layers.
w = getattr(self.module, layer)
delattr(self.module, layer)
self.register_parameter(f'{layer}_raw', nn.Parameter(w.data))
setattr(self.module, layer, w.clone())
if isinstance(self.module, (nn.RNNBase, nn.modules.rnn.RNNBase)):
self.module.flatten_parameters = self._do_nothing
def _setweights(self):
"Apply dropout to the raw weights."
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
if self.training: w = F.dropout(raw_w, p=self.weight_p)
else: w = raw_w.clone()
setattr(self.module, layer, w)
def forward(self, *args):
self._setweights()
with warnings.catch_warnings():
# To avoid the warning that comes because the weights aren't flattened.
warnings.simplefilter("ignore", category=UserWarning)
return self.module(*args)
def reset(self):
for layer in self.layer_names:
raw_w = getattr(self, f'{layer}_raw')
setattr(self.module, layer, raw_w.clone())
if hasattr(self.module, 'reset'): self.module.reset()
def _do_nothing(self): pass
# Cell
class EmbeddingDropout(Module):
"Apply dropout with probability `embed_p` to an embedding layer `emb`."
def __init__(self, emb, embed_p):
self.emb,self.embed_p = emb,embed_p
def forward(self, words, scale=None):
if self.training and self.embed_p != 0:
size = (self.emb.weight.size(0),1)
mask = dropout_mask(self.emb.weight.data, size, self.embed_p)
masked_embed = self.emb.weight * mask
else: masked_embed = self.emb.weight
if scale: masked_embed.mul_(scale)
return F.embedding(words, masked_embed, ifnone(self.emb.padding_idx, -1), self.emb.max_norm,
self.emb.norm_type, self.emb.scale_grad_by_freq, self.emb.sparse)
# Cell
class AWD_LSTM(Module):
"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182"
initrange=0.1
def __init__(self, vocab_sz, emb_sz, n_hid, n_layers, pad_token=1, hidden_p=0.2, input_p=0.6, embed_p=0.1,
weight_p=0.5, bidir=False):
store_attr('emb_sz,n_hid,n_layers,pad_token')
self.bs = 1
self.n_dir = 2 if bidir else 1
self.encoder = nn.Embedding(vocab_sz, emb_sz, padding_idx=pad_token)
self.encoder_dp = EmbeddingDropout(self.encoder, embed_p)
self.rnns = nn.ModuleList([self._one_rnn(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz)//self.n_dir,
bidir, weight_p, l) for l in range(n_layers)])
self.encoder.weight.data.uniform_(-self.initrange, self.initrange)
self.input_dp = RNNDropout(input_p)
self.hidden_dps = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])
self.reset()
def forward(self, inp, from_embeds=False):
bs,sl = inp.shape[:2] if from_embeds else inp.shape
if bs!=self.bs: self._change_hidden(bs)
output = self.input_dp(inp if from_embeds else self.encoder_dp(inp))
new_hidden = []
for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):
output, new_h = rnn(output, self.hidden[l])
new_hidden.append(new_h)
if l != self.n_layers - 1: output = hid_dp(output)
self.hidden = to_detach(new_hidden, cpu=False, gather=False)
return output
def _change_hidden(self, bs):
self.hidden = [self._change_one_hidden(l, bs) for l in range(self.n_layers)]
self.bs = bs
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
"Return one of the inner rnn"
rnn = nn.LSTM(n_in, n_out, 1, batch_first=True, bidirectional=bidir)
return WeightDropout(rnn, weight_p)
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return (one_param(self).new_zeros(self.n_dir, self.bs, nh), one_param(self).new_zeros(self.n_dir, self.bs, nh))
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return tuple(torch.cat([h, h.new_zeros(self.n_dir, bs-self.bs, nh)], dim=1) for h in self.hidden[l])
if self.bs > bs: return (self.hidden[l][0][:,:bs].contiguous(), self.hidden[l][1][:,:bs].contiguous())
return self.hidden[l]
def reset(self):
"Reset the hidden states"
[r.reset() for r in self.rnns if hasattr(r, 'reset')]
self.hidden = [self._one_hidden(l) for l in range(self.n_layers)]
# Cell
def awd_lstm_lm_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]
groups = L(groups + [nn.Sequential(model[0].encoder, model[0].encoder_dp, model[1])])
return groups.map(params)
# Cell
awd_lstm_lm_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
# Cell
def awd_lstm_clas_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(model[0].module.encoder, model[0].module.encoder_dp)]
groups += [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].module.rnns, model[0].module.hidden_dps)]
groups = L(groups + [model[1]])
return groups.map(params)
# Cell
awd_lstm_clas_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.4,
hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
# Cell
class AWD_QRNN(AWD_LSTM):
"Same as an AWD-LSTM, but using QRNNs instead of LSTMs"
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
from .qrnn import QRNN
rnn = QRNN(n_in, n_out, 1, save_prev_x=(not bidir), zoneout=0, window=2 if l == 0 else 1, output_gate=True, bidirectional=bidir)
rnn.layers[0].linear = WeightDropout(rnn.layers[0].linear, weight_p, layer_names='weight')
return rnn
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return one_param(self).new_zeros(self.n_dir, self.bs, nh)
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return torch.cat([self.hidden[l], self.hidden[l].new_zeros(self.n_dir, bs-self.bs, nh)], dim=1)
if self.bs > bs: return self.hidden[l][:, :bs]
return self.hidden[l]
# Cell
awd_qrnn_lm_config = dict(emb_sz=400, n_hid=1552, n_layers=4, pad_token=1, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
# Cell
awd_qrnn_clas_config = dict(emb_sz=400, n_hid=1552, n_layers=4, pad_token=1, bidir=False, output_p=0.4,
hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/models/awdlstm.py
|
awdlstm.py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/33_text.models.core.ipynb (unless otherwise specified).
__all__ = ['LinearDecoder', 'SequentialRNN', 'get_language_model', 'SentenceEncoder', 'masked_concat_pool',
'PoolingLinearClassifier', 'get_text_classifier']
# Cell
from ...data.all import *
from ..core import *
from .awdlstm import *
# Cell
_model_meta = {AWD_LSTM: {'hid_name':'emb_sz', 'url':URLs.WT103_FWD, 'url_bwd':URLs.WT103_BWD,
'config_lm':awd_lstm_lm_config, 'split_lm': awd_lstm_lm_split,
'config_clas':awd_lstm_clas_config, 'split_clas': awd_lstm_clas_split},
AWD_QRNN: {'hid_name':'emb_sz',
'config_lm':awd_qrnn_lm_config, 'split_lm': awd_lstm_lm_split,
'config_clas':awd_qrnn_clas_config, 'split_clas': awd_lstm_clas_split},}
# Transformer: {'hid_name':'d_model', 'url':URLs.OPENAI_TRANSFORMER,
# 'config_lm':tfmer_lm_config, 'split_lm': tfmer_lm_split,
# 'config_clas':tfmer_clas_config, 'split_clas': tfmer_clas_split},
# TransformerXL: {'hid_name':'d_model',
# 'config_lm':tfmerXL_lm_config, 'split_lm': tfmerXL_lm_split,
# 'config_clas':tfmerXL_clas_config, 'split_clas': tfmerXL_clas_split}}
# Cell
class LinearDecoder(Module):
"To go on top of a RNNCore module and create a Language Model."
initrange=0.1
def __init__(self, n_out, n_hid, output_p=0.1, tie_encoder=None, bias=True):
self.decoder = nn.Linear(n_hid, n_out, bias=bias)
self.decoder.weight.data.uniform_(-self.initrange, self.initrange)
self.output_dp = RNNDropout(output_p)
if bias: self.decoder.bias.data.zero_()
if tie_encoder: self.decoder.weight = tie_encoder.weight
def forward(self, input):
dp_inp = self.output_dp(input)
return self.decoder(dp_inp), input, dp_inp
# Cell
class SequentialRNN(nn.Sequential):
"A sequential module that passes the reset call to its children."
def reset(self):
for c in self.children(): getattr(c, 'reset', noop)()
# Cell
def get_language_model(arch, vocab_sz, config=None, drop_mult=1.):
"Create a language model from `arch` and its `config`."
meta = _model_meta[arch]
config = ifnone(config, meta['config_lm']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
tie_weights,output_p,out_bias = map(config.pop, ['tie_weights', 'output_p', 'out_bias'])
init = config.pop('init') if 'init' in config else None
encoder = arch(vocab_sz, **config)
enc = encoder.encoder if tie_weights else None
decoder = LinearDecoder(vocab_sz, config[meta['hid_name']], output_p, tie_encoder=enc, bias=out_bias)
model = SequentialRNN(encoder, decoder)
return model if init is None else model.apply(init)
# Cell
def _pad_tensor(t, bs):
if t.size(0) < bs: return torch.cat([t, t.new_zeros(bs-t.size(0), *t.shape[1:])])
return t
# Cell
class SentenceEncoder(Module):
"Create an encoder over `module` that can process a full sentence."
def __init__(self, bptt, module, pad_idx=1, max_len=None): store_attr('bptt,module,pad_idx,max_len')
def reset(self): getattr(self.module, 'reset', noop)()
def forward(self, input):
bs,sl = input.size()
self.reset()
mask = input == self.pad_idx
outs,masks = [],[]
for i in range(0, sl, self.bptt):
#Note: this expects that sequence really begins on a round multiple of bptt
real_bs = (input[:,i] != self.pad_idx).long().sum()
o = self.module(input[:real_bs,i: min(i+self.bptt, sl)])
if self.max_len is None or sl-i <= self.max_len:
outs.append(o)
masks.append(mask[:,i: min(i+self.bptt, sl)])
outs = torch.cat([_pad_tensor(o, bs) for o in outs], dim=1)
mask = torch.cat(masks, dim=1)
return outs,mask
# Cell
def masked_concat_pool(output, mask, bptt):
"Pool `MultiBatchEncoder` outputs into one vector [last_hidden, max_pool, avg_pool]"
lens = output.shape[1] - mask.long().sum(dim=1)
last_lens = mask[:,-bptt:].long().sum(dim=1)
avg_pool = output.masked_fill(mask[:, :, None], 0).sum(dim=1)
avg_pool.div_(lens.type(avg_pool.dtype)[:,None])
max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]
x = torch.cat([output[torch.arange(0, output.size(0)),-last_lens-1], max_pool, avg_pool], 1) #Concat pooling.
return x
# Cell
class PoolingLinearClassifier(Module):
"Create a linear classifier with pooling"
def __init__(self, dims, ps, bptt, y_range=None):
if len(ps) != len(dims)-1: raise ValueError("Number of layers and dropout values do not match.")
acts = [nn.ReLU(inplace=True)] * (len(dims) - 2) + [None]
layers = [LinBnDrop(i, o, p=p, act=a) for i,o,p,a in zip(dims[:-1], dims[1:], ps, acts)]
if y_range is not None: layers.append(SigmoidRange(*y_range))
self.layers = nn.Sequential(*layers)
self.bptt = bptt
def forward(self, input):
out,mask = input
x = masked_concat_pool(out, mask, self.bptt)
x = self.layers(x)
return x, out, out
# Cell
def get_text_classifier(arch, vocab_sz, n_class, seq_len=72, config=None, drop_mult=1., lin_ftrs=None,
ps=None, pad_idx=1, max_len=72*20, y_range=None):
"Create a text classifier from `arch` and its `config`, maybe `pretrained`"
meta = _model_meta[arch]
config = ifnone(config, meta['config_clas']).copy()
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
if lin_ftrs is None: lin_ftrs = [50]
if ps is None: ps = [0.1]*len(lin_ftrs)
layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]
ps = [config.pop('output_p')] + ps
init = config.pop('init') if 'init' in config else None
encoder = SentenceEncoder(seq_len, arch(vocab_sz, **config), pad_idx=pad_idx, max_len=max_len)
model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps, bptt=seq_len, y_range=y_range))
return model if init is None else model.apply(init)
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/models/core.py
|
core.py
|
from .core import *
from .awdlstm import *
|
zwyfastai
|
/zwyfastai-2.0.21.tar.gz/zwyfastai-2.0.21/src/fastai/text/models/__init__.py
|
__init__.py
|
from setuptools import setup,find_packages
setup(
name='zwzd',
version=1.0,
author='小松果',
author_email='[email protected]',
maintainer='小松果',
maintainer_email='[email protected]',
url='https://www.bilibili.com/bangumi/play/ep94081',
download_url='https://www.bilibili.com/video/BV1x8411n7vv/')
|
zwzd
|
/zwzd-1.0.tar.gz/zwzd-1.0/setup.py
|
setup.py
|
###Description of the project
A Python package to enable quick iterative time series analysis, modelling and forecasting using combinations of
automatic transformations, differencing and forecasting models.
### Installation
```
pip install zx_forecasting_framework
```
### Example
```
from zx_forecasting_framework import Analyse
from zx_forecasting_framework import Metrics
from zx_forecasting_framework import Model
from zx_forecasting_framework import TimeSeriesObject
from zx_forecasting_framework import Transform
```
### License
ZX Forecasting Framework is licensed under <a href = 'https://opensource.org/licenses/MIT'>the MIT License</a>
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/README.md
|
README.md
|
from setuptools import setup
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='zx_forecasting_framework',
version='0.0.1',
description='Forecasting framework for ZX projects',
packages=['zx_forecasting_framework'],
author='Tamojit Maiti, Shashi Bhushan Singh',
author_email='[email protected]',
long_description=long_description,
long_description_content_type='text/markdown'
,zip_safe=False)
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/setup.py
|
setup.py
|
import pandas as pd
import numpy as np
#################################### Class Definitions ########################
class TimeSeriesObject:
"""
A class that defines the time-series object
Attributes:
df : pd.DataFrame
The dataframe containing the time-series data
response_variable : str
Column name containing the response variable
date_columns : str
Column name containing the date column
drop_col : str
Columns name which needs to be dropped, must be part of original dataframe
add_col : str
Column name which needs to be added, must be part of original dataframe
start_train_date : pd.datetime
Date from which training is to be started
end_train_date : pd.datetime
Date till which training is to be done
start_test_date : pd.datetime
Date from which testing is to be started
end_test_date : pd.datetime
Date till which testning is to be done
start_pred_date : pd.datetime
Date from which prediction is to be started
end_pred_date : pd.datetime
Date till which prediction is to be done
"""
def __init__(self, df):
"""
Parameters
----------
df : pd.DataFrame
The dataframe containing the time series data
"""
# Instantiating variables
self.df = df
# Auxilliary variables
# target
self.response_variable = None
# modified df
self.dfm = df
# date column
self.date_column = None
# X-y splits
self.X = None
self.y = None
# Stationary X-y
self.Xs = self.X
self.ys = self.y
# feature selection
self.Xsf = None
self._selected_features = []
# train-test-pred splits
self.X_train = None
self.X_test = None
self.X_pred = None
self.y_train = None
self.y_test = None
self.y_pred = None
def set_date_column(self, date_column):
"""
Parameters
----------
date_column : str
Column name containing the date column
Returns
----------
self : timeSeriesObject
timeSeriesObject
"""
self.df = self.df.set_index(date_column)
self.df.index = pd.to_datetime(self.df.index, format = '%d-%m-%Y')
self.dfm = self.df
return self
def drop_col(self, drop_col):
"""
Parameters
----------
drop_col : str
Columns name that is to be dropped
Returns
----------
self : timeSeriesObject
timeSeriesObject
"""
self.dfm = self.dfm.drop(drop_col, axis = 1)
return self
def add_col(self, add_col):
"""
Parameters
----------
add_col : str
Columns name that is to be dropped
Returns
----------
self : timeSeriesObject
timeSeriesObject
"""
col_list = self.dfm.columns.tolist() + [add_col]
self.dfm = self.df[col_list]
return self
def xy_split(self, response_variable):
"""
Parameters
----------
response_variable : str
Columns name that is the response variable
Returns
----------
self : timeSeriesObject
timeSeriesObject
"""
self.response_variable = response_variable
self.y = self.dfm[response_variable]
self.X = self.dfm.drop(response_variable, axis = 1)
self.Xm = self.X
self.Xs = self.X
self.ys = self.y
return self
def train_test_split(self,
start_train_date,
end_train_date,
start_test_date,
end_test_date,
start_pred_date,
end_pred_date):
"""
Parameters
----------
start_train_date : pd.datetime
Date from which training is to be started
end_train_date : pd.datetime
Date till which training is to be done
start_test_date : pd.datetime
Date from which testing is to be started
end_test_date : pd.datetime
Date till which testning is to be done
start_pred_date : pd.datetime
Date from which prediction is to be started
end_pred_date : pd.datetime
Date till which prediction is to be done
Returns
----------
self : timeSeriesObject
timeSeriesObject
"""
self.X_train = self.Xs.loc[start_train_date:end_train_date]
self.y_train = self.ys.loc[start_train_date:end_train_date]
self.X_test = self.Xs.loc[start_test_date:end_test_date]
self.y_test = self.ys.loc[start_test_date:end_test_date]
self.X_pred = self.Xs.loc[start_pred_date:end_pred_date]
self.y_pred = np.nan * len(self.X_pred)
return self
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/zx_forecasting_framework/timeSeriesObject.py
|
timeSeriesObject.py
|
import numpy as np
import warnings
####################################### Data ################################
class Metrics:
"""
A class used to represent the metrics relevant for time-series
Attributes
----------
y_true : pd.Series
The actuals
y_predicted : pd.Series
The forecasts
Methods
----------
coef_variation
Coefficient of Variation
avg_demand_interval
Average Demand Interval
mean_absolute_error
MAE
mean_absolute_percentage_error
MAPE
symmetric_mean_absolute_percentage_error
SMAPE
"""
def __init__(self, y_true, y_predicted = None):
"""
Parameters
----------
y_true : pd.Series
The actuals
y_predicted : pd.Series
The forecasts
"""
# Instantiation Attributes
self.y_true = np.ravel(np.array(y_true))
if (y_predicted is not None):
self.y_predicted = np.ravel(np.array(y_predicted))
# Metrics
self.cov = np.round(self.coef_variation(), 2)
self.adi = np.round(self.avg_demand_interval(), 2)
if (y_predicted is not None):
self.mae = np.round(self.mean_absolute_error(), 2)
self.mape = np.round(self.mean_absolute_percentage_error(), 2)
self.smape = np.round(self.sym_mean_absolute_percentage_error(), 2)
else:
warnings.warn("No 'y_predicted' passed")
# Univariate Metrics
def coef_variation(self):
"""
Coefficient of variation of the response time series
Parameters
----------
Returns
-------
COV of the actual time series
"""
return np.std(self.y_true)/np.mean(self.y_true)
def avg_demand_interval(self):
"""
Average Demand Interval for the response time series
Parameters
----------
Returns
-------
ADI of the actual time series
"""
return len(self.y_true)/sum(self.y_true > 0)
# Bivariate Metrics
def mean_absolute_error(self):
"""
Mean Absolute Error for the predicted time series
with respect to the actual time series
Parameters
----------
Returns
-------
MAE of the forecasted time series with respect to the actuals
"""
if len(self.y_predicted) == 0:
return None
else:
return np.mean(np.abs(self.y_true - self.y_predicted))
def mean_absolute_percentage_error(self):
"""
Mean Absolute Percentage Error for the predicted time series
with respect to the actual time series
Parameters
----------
Returns
-------
MAPE of the forecasted time series with respect to the actuals
"""
if len(self.y_predicted) == 0:
return None
else:
return 100 * np.mean(np.abs(self.y_true - self.y_predicted)/ self.y_true)
def sym_mean_absolute_percentage_error(self):
"""
Symmetric Mean Absolute Percentage Error for the
predicted time series with respect to the actual time series
Parameters
----------
Returns
-------
SMAPE of the forecasted time series with respect to the actuals
"""
if len(self.y_predicted) == 0:
return None
else:
return 200 * np.mean(np.abs(self.y_true - self.y_predicted)/ (self.y_true + self.y_predicted))
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/zx_forecasting_framework/metrics.py
|
metrics.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.tsa.api as smt
import seaborn as sns
from .timeSeriesObject import TimeSeriesObject
#################################### Class Definitions ########################
class Analyse:
"""
A class used to represent the Analysis functionalities for time-series
Attributes:
tso: timeSeriesObject
A timeSeriesObject (to be analysed)
Methods:
visual_summary
Provides a visual summary of the time series passed, with it's time series plots,
and the ACF and PACF plots
_split(a,n)
Splits the iterable a into n parts, whilst preserving the ordering
intermittency(n_chunks)
Plots the COV vs the ADI for each segment of the time series, to visually eaxmine
the demand characteristics and classify them into one of the four buckets
"""
def __init__(self, tso):
"""
Parameters:
tso: timeSeriesObject
"""
# Initializing Attributes
self.tso = tso
# Auxilliary Attributes
# X-y (to be replaced by self.tso.X, self.tso.y)
self.X = tso.X
self.y = tso.y
# Intermittency Measure
self.adi = None
self.cov = None
def visual_summary(self, series):
"""
Prints the time-series plot, the ACF and the PACF of the passed series
Parameters:
series : pd.Series
Returns:
fig : matplotlib.pyplot.figure
"""
plt.figure(figsize = (10,10))
layout = (2,2)
ts_ax = plt.subplot2grid(layout, (0,0), colspan = 2)
acf_ax = plt.subplot2grid(layout, (1,0))
pacf_ax = plt.subplot2grid(layout, (1,1))
series.plot(ax = ts_ax, )
smt.graphics.plot_acf(series, lags=int(len(series)/3), ax=acf_ax)
smt.graphics.plot_pacf(series, lags=int(len(series)/3), ax=pacf_ax)
[ax.set_xlim(1.5) for ax in [acf_ax, pacf_ax]]
sns.despine()
plt.tight_layout()
return ts_ax, acf_ax, pacf_ax
def _split(self, a, n):
"""
Splits a into n nearly equal parts, whilst preserving order
Parameters:
a : np.array
Iterable which needs to be spliced
n : int
Number of nearly equal parts in which the array needs to be spliced into
Returns:
list :
List of n lists, containing the spliced a array
"""
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def intermittency(self, n_chunks = 6):
"""
Plots COV vs ADI for demand classification
Parameters:
n_chunks :
Number of parts the time-series needs to be split into
Returns:
plot : matplotlib.pyplot.figure
"""
self.cov = np.std(self.y)/np.mean(self.y)
self.adi = len(self.y)/sum(self.y > 0)
plt.figure(figsize = (6,6))
i = 0
for y_chunk in list(self._split(self.y.values, n_chunks)):
plt.plot((np.std(y_chunk)/np.mean(y_chunk))**2,
len(y_chunk)/sum(y_chunk > 0),
marker = 'o',
alpha = ((i+1)/n_chunks),
color = 'blue')
i+=1
plt.title('COV vs ADI plot for Demand Classification')
plt.xlabel('COV^2')
plt.ylabel('ADI')
plt.xlim([0,1])
plt.ylim([0,5])
plt.plot([0.49, 0.49], [0,5], color = 'green')
plt.plot([0.0, 1.0], [1.32, 1.32], color = 'green')
plt.annotate('Smooth', xy = (0.2,0.5))
plt.annotate('Erratic', xy = (0.8, 0.5))
plt.annotate('Intermittent', xy = (0.2,3))
plt.annotate('Lumpy', xy = (0.8, 3))
return plt
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/zx_forecasting_framework/analyse.py
|
analyse.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.tsa.api as smt
import seaborn as sns
# import timeSeriesObject, analyse
from .timeSeriesObject import TimeSeriesObject
from .analyse import Analyse
from scipy.stats import boxcox
from math import exp, log
#################################### Class Definitions ########################
class Transform:
"""
A class used to represent the transform object
Parameters
----------
tso : timeSeriesObject
timeSeriesObject
Methods
----------
do_boxcox
Performs the box-cox transformation on the response variable
inverse_boxcox
Inverses the box-cox transformation on the response variable
stationarize_y
Stationarizes the response variable
stationarize_X
Stationarizes the exogenous variable matrix
"""
def __init__(self, tso):
"""
Parameters
----------
tso : timeSeriesObject
timeSeriesObject
"""
# Initializing Attributes
self.tso = tso
# Auxilliary Attributes
# X-y
self.X = tso.X
self.y = tso.y
# y boxcox transformed
self.yb = tso.y
self.y_lambda = None
# Stationary X-y
self.Xs = tso.X
self.ys = tso.y
self.n_lag = None
self.lags = {}
self.y_stationary_history = self.tso.y
self.y_diff_order = None
def do_boxcox(self, lmbda = None):
"""
Performs the boxcox transformation on the response variable
Parameters
----------
lmbda : float
Lambda parameter for the boxcox transformation
Returns
---------
list : list
Box-cox transformed series
"""
if lmbda == None:
yb, self.y_lambda = boxcox(x = self.tso.y.dropna())
else:
yb = boxcox(x = self.tso.y.dropna(),
lmbda = lmbda)
self.y_lambda = lmbda
self.yb = pd.Series(data = list(yb) + [np.nan]*(len(self.tso.y) - len(yb)),
index = self.tso.y.index)
return self.yb
def inverse_boxcox(self, series):
"""
Performs the inverse boxcox transformation on the response variable
Parameters
----------
series : pd.Series
The transformed series that needs to be inverse transformed
Returns
---------
list : list
Box-cox transformed series
"""
if self.y_lambda == 0:
return pd.Series(data = exp(series), index = series.index)
else:
return pd.Series(
data = np.exp(np.log(self.y_lambda * np.abs(series.values) + 1)/self.y_lambda),
index = series.index)
def unstationarize_v2(self, series, initial_vals):
z = series.values
for initial_val in initial_vals:
z = initial_val + np.cumsum(z)
pass
def stationarize_y(self,):
"""
Stationarizing the response variable
Parameters
----------
Returns
----------
list : list
Stationary response variable
"""
series = self.yb
y = pd.DataFrame(data = series.values,
index = series.index,
columns = ['diff_0'])
z = y.diff_0
diff_order = 0
while smt.adfuller(z.dropna())[1]>0.05:
diff_order = diff_order + 1
y['diff_{}'.format(diff_order)] = y['diff_{}'.format(diff_order - 1)].diff(1)
z = y['diff_{}'.format(diff_order)]
self.y_stationary_history = y
self.y_diff_order = diff_order
self.ys = y['diff_{}'.format(diff_order)]
self.tso.ys = self.ys
return self.ys
def stationarize_X(self,):
"""
Performs the stationarizing operation on the data matrix
Parameters
----------
Returns
----------
dataframe : pd.DataFrame
the stationarized data matrix
"""
X = self.tso.X
for col in X.columns.tolist():
diff_order = 0
while smt.adfuller(X[col].dropna())[1]>0.05:
diff_order = diff_order + 1
X[col] = X[col].diff(1)
X = X.rename(columns = {col:col + '_diff_{}'.format(diff_order)})
self.Xs = X
self.tso.Xs = self.Xs
return self.Xs
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/zx_forecasting_framework/transform.py
|
transform.py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from statsmodels.tsa.api import SimpleExpSmoothing, Holt, ExponentialSmoothing
from statsmodels.tsa.statespace.sarimax import SARIMAX
# import timeSeriesObject, analyse, transform, metrics
from .timeSeriesObject import TimeSeriesObject
from .analyse import Analyse
from .transform import Transform
from .metrics import Metrics
# import metrics
############################### Class Definition ############################
class Model:
"""
A class used to represent time-series forecating models
Parameters
--------
tso : timeSeriesObject
timeSeriesObject
tst : transform
time series transformation object
Methods
--------
naive
Naive forecasting y_t <-- y_{t-1}
seasonal_naive
Seasonal naive forecasting y_t <-- y_{t-n}
moving_average
Moving average forecasting with growth factor
simple_exp_smooth
Simple Exponetial Smoothing forecasting
holt
Holt Exponential Smoothing forecasting
holt_winters
Holt-Winters Exponential Smoothing forecasting
linear_regression
Linear Regression forecasting
elastic_net
Elastic Net Regression forecasting
random_forest
Random Forest Regression forecasting
xgboost
XGBoost Regression forecasting
light_gbm
Light GBM Regression forecasting
sarimax
SARIMAX forecasting
"""
def __init__(self, tso, tst):
"""
Parameters
--------
tso : timeSeriesObject
timeSeriesObject
tst : transform
time series transformation object
"""
self.tso = tso # time-series object
self.tst = tst # transformation object
# Auxilliary variables
# Each Model
self.mape_list = []
self.smape_list = []
self.predictions_list = []
self.model_list = []
# Summary Table
self.summary_table = []
# Best Model
self.best_mape = None
self.best_smape = None
self.best_predictions = None
self.best_model = None
def naive(self,):
"""
Naive forecasting model. The forecast tomorrow is the same as the forecast today
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Fitting
y_test_pred_sb = pd.Series(data = [self.tso.y_train[-1]]*len(self.tso.y_test),
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train + Test
y_pred_sb = pd.Series(data = [self.tso.y_test[-1]]*len(self.tso.X_pred),
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Naive')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Naive')
return y_test_pred, y_test, y_pred
def seasonal_naive(self,seasonal_period = 52):
"""
Seasonal Naive Forecasting Model. The forecast tomorrow is the same as the same day the year before
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Fitting
y_test_pred_sb = pd.Series(data = self.tso.y_train.values[-seasonal_period:-seasonal_period + len(self.tso.X_test)],
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train + Test
y_pred_sb = pd.Series(data = pd.concat([self.tso.y_train, self.tso.y_test]).values[-seasonal_period:-seasonal_period + len(self.tso.X_pred)],
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Seasonal Naive')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Seasonal Naive')
return y_test_pred, y_test, y_pred
def elastic_net(self, alpha = 0.5):
"""
Linear Regression with L1 and L2 regularization
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Model Initialization
elastic_net_model = ElasticNet(alpha = 0.5)
# Fitting to Train
elastic_net_model.fit(self.tso.X_train[3:], self.tso.y_train[3:])
y_test_pred_sb = elastic_net_model.predict(self.tso.X_test)
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train and Test
elastic_net_model.fit(pd.concat([self.tso.X_train[3:], self.tso.X_test]),
pd.concat([self.tso.y_train[3:], self.tso.y_test]))
y_pred_sb = elastic_net_model.predict(self.tso.X_pred)
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Elastic Net')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Elastic-Net')
return y_test_pred, y_test, y_pred
def linear_regression(self, ):
"""
Linear Regression with Ordinary Least Squares
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Model Initialization
linreg_model = LinearRegression()
# Fitting to Train
linreg_model.fit(self.tso.X_train[3:], self.tso.y_train[3:])
y_test_pred_sb = linreg_model.predict(self.tso.X_test)
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# print(y_test_pred, y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train and Test
linreg_model.fit(pd.concat([self.tso.X_train[3:], self.tso.X_test]),
pd.concat([self.tso.y_train[3:], self.tso.y_test]))
y_pred_sb = linreg_model.predict(self.tso.X_pred)
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Linear Regression')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Linear Regression')
return y_test_pred, y_test, y_pred
def random_forest(self,):
"""
Scikit-learn implementation of random-forest
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Model Initialization
rf_model = RandomForestRegressor()
# Fitting to Train
rf_model.fit(self.tso.X_train[3:], self.tso.y_train[3:])
y_test_pred_sb = rf_model.predict(self.tso.X_test)
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train and Test
rf_model.fit(pd.concat([self.tso.X_train[3:], self.tso.X_test]),
pd.concat([self.tso.y_train[3:], self.tso.y_test]))
y_pred_sb = rf_model.predict(self.tso.X_pred)
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Random Forest')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Random Forest')
return y_test_pred, y_test, y_pred
def xgboost(self,):
"""
eXtreme Gradient Boosting on Decision Trees
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Model Initialization
xgb_model = XGBRegressor()
# Fitting to Train
xgb_model.fit(self.tso.X_train[3:], self.tso.y_train[3:])
y_test_pred_sb = xgb_model.predict(self.tso.X_test)
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train and Test
xgb_model.fit(pd.concat([self.tso.X_train[3:], self.tso.X_test]),
pd.concat([self.tso.y_train[3:], self.tso.y_test]))
y_pred_sb = xgb_model.predict(self.tso.X_pred)
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for XGBoost')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('XGBoost')
return y_test_pred, y_test, y_pred
def light_gbm(self,):
"""
LightGBM on Decision Trees
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Model Initialization
lgbm_model = LGBMRegressor()
# Fitting to Train
lgbm_model.fit(self.tso.X_train[3:], self.tso.y_train[3:])
y_test_pred_sb = lgbm_model.predict(self.tso.X_test)
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train and Test
lgbm_model.fit(pd.concat([self.tso.X_train[3:], self.tso.X_test]),
pd.concat([self.tso.y_train[3:], self.tso.y_test]))
y_pred_sb = lgbm_model.predict(self.tso.X_pred)
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Light GBM')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Light GBM')
return y_test_pred, y_test, y_pred
def moving_average(self, seasonal_period = 26,
window_size = 2,
growth_factor = False):
"""
Moving Average with growth factor.
The prediction tomorrow is a moving average of the actuals in the near history
and the actuals of the previous season
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
y_test_pred_sb = np.zeros(shape=len(self.tso.y_test))
if growth_factor:
for i in range(len(y_test_pred_sb)):
y_test_pred_sb[i] = np.mean(
self.tso.y_train.values[-seasonal_period + i - window_size - 1: -seasonal_period + i - 1])
else:
num = np.zeros(shape = len(self.tso.y_test))
den = np.zeros(shape = len(self.tso.y_test))
for i in range(len(y_test_pred_sb)):
num[i] = np.mean(self.tso.y_train.values[-seasonal_period + i - window_size - 1: -seasonal_period + i - 1])
den[i] = np.mean(self.tso.y_train.values[-2*seasonal_period + i - window_size - 1: -2*seasonal_period + i - 1])
y_test_pred_sb[i] = (num[i]/den[i]) * self.tso.y_train.values[-seasonal_period + i -window_size]
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index=self.tso.y_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting to Train + Test
y_pred_sb = np.zeros(shape=len(self.tso.X_pred))
if growth_factor:
for i in range(len(y_pred_sb)):
y_pred_sb[i] = np.mean(
pd.concat([self.tso.y_train, self.tso.y_test]).values[-seasonal_period + i - window_size - 1: -seasonal_period + i - 1])
else:
num = np.zeros(shape = len(self.tso.X_pred))
den = np.zeros(shape = len(self.tso.X_pred))
for i in range(len(y_pred_sb)):
num[i] = np.mean(pd.concat([self.tso.y_train, self.tso.y_test]).values[-seasonal_period + i - window_size - 1: -seasonal_period + i - 1])
den[i] = np.mean(pd.concat([self.tso.y_train, self.tso.y_test]).values[-2*seasonal_period + i - window_size - 1: -2*seasonal_period + i - 1])
y_pred_sb[i] = (num[i]/den[i]) * pd.concat([self.tso.y_train, self.tso.y_test]).values[-seasonal_period + i -window_size]
y_pred_sb = pd.Series(data = y_pred_sb,
index=self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Moving Average')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Moving Average')
return y_test_pred, y_test, y_pred
def simple_exp_smoothing(self, smoothing_level = 0.3, optimized = False):
"""
Simple Exponential Smoothing
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
ses_model = SimpleExpSmoothing(self.tso.y_train[self.tst.y_diff_order:]).fit(smoothing_level = smoothing_level,
optimized = optimized)
y_test_pred_sb = ses_model.forecast(len(self.tso.y_test))
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting Train + Test
ses_model = SimpleExpSmoothing(pd.concat([self.tso.y_train[self.tst.y_diff_order:], self.tso.y_test])).fit(smoothing_level=smoothing_level,
optimized=optimized)
y_pred_sb = ses_model.forecast(len(self.tso.X_pred))
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Simple Exponential Smoothing')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Simple Exponential Smoothing')
return y_test_pred, y_test, y_pred
def holt(self, smoothing_level = 0.3, smoothing_slope = 0.2, optimized = False):
"""
Holt's Double Exponential Smoothing Model
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
holt_model = Holt(self.tso.y_train[self.tst.y_diff_order:],
exponential = False,
damped = False).fit(smoothing_level = smoothing_level,
smoothing_slope = smoothing_slope,
optimized = optimized)
y_test_pred_sb = holt_model.forecast(len(self.tso.y_test))
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting Train + Test
holt_model = Holt(pd.concat([self.tso.y_train[self.tst.y_diff_order:], self.tso.y_test]),
exponential = False,
damped = False).fit(smoothing_level = smoothing_level,
smoothing_slope = smoothing_slope,
optimized = optimized)
y_pred_sb = holt_model.forecast(len(self.tso.X_pred))
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Holt Exponential Smoothing')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Holt Exponential Smoothing')
return y_test_pred, y_test, y_pred
def holt_winters(self, seasonal = 'add',
seasonal_periods = 52,
smoothing_level = 0.3,
smoothing_slope = 0.1,
smoothing_seasonal = 0.2):
"""
Holt-Winter Triple Exponential Smoothing
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
holt_model = ExponentialSmoothing(self.tso.y_train[self.tst.y_diff_order:],
seasonal = seasonal,
seasonal_periods = seasonal_periods).fit(smoothing_level = smoothing_level,
smoothing_slope = smoothing_slope,
smoothing_seasonal = smoothing_seasonal)
y_test_pred_sb = holt_model.forecast(len(self.tso.y_test))
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting Train + Test
holt_model = ExponentialSmoothing(pd.concat([self.tso.y_train[self.tst.y_diff_order:], self.tso.y_test]),
seasonal = seasonal,
seasonal_periods = seasonal_periods).fit(smoothing_level = smoothing_level,
smoothing_slope = smoothing_slope,
smoothing_seasonal = smoothing_seasonal)
y_pred_sb = holt_model.forecast(len(self.tso.X_pred))
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for Holt-Winters Exponential Smoothing')
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('Holt-Winters Exponential Smoothing')
return y_test_pred, y_test, y_pred
def sarimax(self, seasonalities = [26,52]):
"""
Seasonal Autoregressive Integrated Moving Average with eXogenous variables
Automatically selects best parameters based on AIC values
Returns
--------
y_test_pred : pd.Series
Predictions on the test dataset
y_test : pd.Series
Actuals of the test dataset
y_pred : pd.Series
Predictions for the forecast period
"""
# Fitting Train
# return self.tso.X_train
aic_dict = {}
for p in range(3):
for q in range(3):
for m in seasonalities:
model = SARIMAX(endog = self.tso.y_train[2:],
exog = self.tso.X_train[2:],
order = (p,0,q),
seasonal_order=(0,0,0,m)).fit(disp = 0)
aic_dict[str(p)+ '-' + str(q) + '-' + str(m)] = model.aic
aic_table = pd.DataFrame(aic_dict.values(), index = aic_dict.keys()).rename(columns={0: 'aic'})
optimal_vals = aic_table[aic_table.aic == aic_table.aic.min()].index[-1].split("-")
p,q,m = int(optimal_vals[0]), int(optimal_vals[1]), int(optimal_vals[2])
model = SARIMAX(endog = self.tso.y_train[2:],
exog = self.tso.X_train[2:],
order = (p,0,q),
seasonal_order=(0,0,0,m)).fit()
y_test_pred_sb = model.forecast(len(self.tso.y_test),
exog = self.tso.X_test)
y_test_pred_sb = pd.Series(data = y_test_pred_sb,
index = self.tso.X_test.index)
y_test_sb = self.tso.y_test
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order > 0:
y_test_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] +\
np.cumsum(y_test_pred_sb)
y_test_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) - 1] + \
np.cumsum(y_test_sb)
diff_order = diff_order - 1
y_test_pred_b = y_test_pred_sb
y_test_b = y_test_sb
# Inverse-Boxcox
# Testing Predictions
y_test_pred = self.tst.inverse_boxcox(y_test_pred_b)
# Testing Actuals
y_test = self.tst.inverse_boxcox(y_test_b)
# Fitting Train + Test
model = SARIMAX(endog = pd.concat([self.tso.y_train[2:], self.tso.y_test]), exog = pd.concat([self.tso.X_train[2:],self.tso.X_test]),
order = (p,0,q),
seasonal_order=(0,0,0,m)).fit()
y_pred_sb = model.forecast(len(self.tso.X_pred),
exog = self.tso.X_pred)
y_pred_sb = pd.Series(data = y_pred_sb,
index = self.tso.X_pred.index)
# Destationarizing
diff_order = self.tst.y_diff_order
y_stationary_history = self.tst.y_stationary_history
while diff_order>0:
y_pred_sb = y_stationary_history['diff_{}'.format(diff_order - 1)].iloc[len(self.tso.y_train) + len(self.tso.y_test) - 1] + \
np.cumsum(y_pred_sb)
diff_order = diff_order - 1
y_pred_b = y_pred_sb
# Inverse-Boxcox
# Predictions
y_pred = self.tst.inverse_boxcox(y_pred_b)
# Visualize
plt.figure(figsize = (14,6))
plt.plot(self.tst.y, label = 'Historical')
plt.plot(y_test, label = 'Actuals')
plt.plot(y_test_pred, label = 'Forecast')
plt.plot(y_pred, label = 'Predictions')
plt.title('Forecast vs Actuals and Predictions for SARIMAX({},{},{},{})'.format(p,0,q,m))
plt.legend()
plt.show()
# Evaluate
smape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).smape
mape = metrics.Metrics(y_true = y_test, y_predicted = y_test_pred).mape
# Update list
self.smape_list.append(smape)
self.mape_list.append(mape)
self.predictions_list.append(y_pred.values.tolist())
self.model_list.append('SARIMAX')
return y_test_pred, y_test, y_pred
def construct_summary_table(self,):
"""
Constructs and returns summary table
Returns
-------
summary_table : pd.DataFrame
Summary Table containing Model Name, MAPE, SMAPE, Predictions
"""
self.naive()
self.seasonal_naive()
self.moving_average()
self.linear_regression()
self.elastic_net()
self.random_forest()
self.xgboost()
self.light_gbm()
self.simple_exp_smoothing()
self.holt()
self.holt_winters()
self.sarimax()
self.summary_table = pd.DataFrame(data = {'model_name':self.model_list,
'smape':self.smape_list,
'mape':self.mape_list,
'predictions':self.predictions_list},
index = [i+1 for i in range(len(self.model_list))])
return self.summary_table
def find_best_model(self, criteria = 'mape',):
"""
Finds the best model of all the models run
Returns
-------
best_model_stats : pd.DataFrame
Best model name, MAPE, SMAPE, Predictions
"""
if type(self.summary_table) == pd.DataFrame:
summary_table = self.summary_table
else:
summary_table = self.construct_summary_table()
min_metric = summary_table[criteria].min()
best_model_stats = summary_table[summary_table[criteria] == min_metric]
self.best_mape = best_model_stats.mape.values[0]
self.best_model = best_model_stats.model_name.values[0]
self.best_predictions = pd.Series(data = best_model_stats.predictions.values[0],
index = self.tso.X_pred.index)
self.best_smape = best_model_stats.smape.values[0]
return best_model_stats
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/zx_forecasting_framework/model.py
|
model.py
|
from .analyse import Analyse
from .metrics import Metrics
from .model import Model
from .timeSeriesObject import TimeSeriesObject
from .transform import Transform
|
zx-forecasting-framework
|
/zx_forecasting_framework-0.0.1.tar.gz/zx_forecasting_framework-0.0.1/zx_forecasting_framework/__init__.py
|
__init__.py
|
# This is a solution to the OpenMC benchmark😊😊😊
|
zx-openmc
|
/zx_openmc-0.0.4.tar.gz/zx_openmc-0.0.4/README.md
|
README.md
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="zx_openmc",
version="0.0.4",
author="zhangxin",
author_email="[email protected]",
description="This is a solution to the OpenMC benchmark",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/zx2810/zx_openmc",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
zx-openmc
|
/zx_openmc-0.0.4.tar.gz/zx_openmc-0.0.4/setup.py
|
setup.py
|
[](https://travis-ci.org/kosarev/zx)
# zx
ZX Spectrum Emulator written in a mix of Python and C++.

[More screenshots](https://github.com/kosarev/zx/tree/master/screenshots)
### Features
* Designed to be suitable for research and development purposes
such as unattended testing of Spectrum software, timing
analysis, etc.
* Meant to be easy to customize and re-use via Python interfaces.
* Fast and accurate emulation.
* Based on the fast and flexible
[Z80 emulator](https://github.com/kosarev/z80).
### Development status
* General status: working pre-alpha.
* Supported machines: 48K only for now.
* Display: multi-colour effects,
[accurate timings](https://github.com/kosarev/zx/blob/master/test/screen_timing/SCREEN_TIMING.md).
* Sound: not supported yet.
* Tape: TAP and TZX formats supported as well as conversion to WAV.
* Snapshots: Z80.
* Playback recordings: RZX.
### Installation and running
For the latest release:
```shell
$ sudo pip3 install zx
```
Or directly from the repository for current development version:
```shell
$ sudo pip3 install git+https://github.com/kosarev/zx
```
Local development setups are also supported:
```shell
$ git clone --recursive https://github.com/kosarev/zx
$ cd zx
$ python3 setup.py develop --prefix ~/.local
```
Running:
```shell
$ zx
```
### Controls
`F1` displays help.
`F2` is to save snapshot.
`F3` is to load snapshot or tape file.
`F6` pauses/resumes tape.
`F10` and `ESC` quit the emulator.
`F11` and double click switch between fullscreen and windowed mode.
`PAUSE` and mouse click pause/resume emulation or RZX playback.
Any Spectrum key stroke resumes emulation and leaves the RZX
playback mode back to the regular emulation mode.
### Running snapshots, recordings and tapes
```shell
$ zx elven.z80
```
```shell
$ zx exolon.rzx
```
```shell
$ zx https://www.worldofspectrum.org/pub/sinclair/games/e/EricTheFloaters.tzx.zip
```
### Converting files
Supported formats: `.rzx`, `.scr`, `.tap`, `.tzx`, `.wav`,
`.z80`, `.zip`.
```shell
$ zx jack.tzx jack.wav
```
```shell
$ zx eric.tap eric.z80
```
### Dumping files
```shell
$ zx dump rick.z80
OrderedDict([('id', 'z80_snapshot'), ('a', 213), ('f', 66), ...
```
On the `dump` command, **zx** parses the specified file (that can
be of any supported format) in the form of raw Python data.
### Reference papers
* [Screen timings](https://github.com/kosarev/zx/blob/master/test/screen_timing/SCREEN_TIMING.md)
|
zx
|
/zx-0.7.0.tar.gz/zx-0.7.0/README.md
|
README.md
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import distutils.sysconfig, os
import inspect
# from distutils.core import Extension
from setuptools import Extension, setup
ZX_MAJOR_VERSION = 0
ZX_MINOR_VERSION = 7
ZX_PATCH_VERSION = 0
here = os.path.abspath(os.path.dirname(inspect.getsource(lambda:0)))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Work around the problem with the warning about '-Wstrict-prototypes'.
# https://bugs.python.org/issue1222585
config_vars = distutils.sysconfig.get_config_vars()
opt_to_remove = '-Wstrict-prototypes'
for var in ['OPT']:
if var in config_vars:
opts = config_vars[var].split()
if opt_to_remove in opts:
opts.remove(opt_to_remove)
config_vars[var] = ' '.join(opts)
zx_emulatorbase_module = Extension(
name='zx._emulatorbase',
define_macros=[('ZX_MAJOR_VERSION', '%d' % ZX_MAJOR_VERSION),
('ZX_MINOR_VERSION', '%d' % ZX_MINOR_VERSION),
('ZX_PATCH_VERSION', '%d' % ZX_PATCH_VERSION)],
extra_compile_args=['-std=c++11', '-Wall', '-fno-exceptions', '-fno-rtti',
'-O3',
'-UNDEBUG', # TODO
],
sources=['zx.cpp', 'zx/_emulatorbase.cpp'],
language='c++')
# TODO: Update the URL once we have a published documentation.
# TODO: Do we have a name for the emulator?
setup(name='zx',
version='%d.%d.%d' % (ZX_MAJOR_VERSION, ZX_MINOR_VERSION,
ZX_PATCH_VERSION),
description='ZX Spectrum Emulator for Researchers and Developers',
long_description=long_description,
long_description_content_type='text/markdown',
author='Ivan Kosarev',
author_email='[email protected]',
url='https://github.com/kosarev/zx/',
ext_modules=[zx_emulatorbase_module],
packages=['zx'],
install_requires=[
'pycairo',
'pygobject',
],
package_data={'zx': ['roms/*']},
entry_points={
'console_scripts': [
'zx = zx:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: X11 Applications :: GTK',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: C++',
# TODO: Are we going to support Python 2?
# TODO: Specific versions?
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Games/Entertainment',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Emulators',
],
license='MIT',
# TODO: Respect other parameters.
)
|
zx
|
/zx-0.7.0.tar.gz/zx-0.7.0/setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
# ZX Spectrum Emulator.
# https://github.com/kosarev/zx
#
# Copyright (C) 2017-2019 Ivan Kosarev.
# [email protected]
#
# Published under the MIT license.
import gc, unittest
''' XFAIL
class test_create(unittest.TestCase):
def runTest(self):
import zx
mach = zx.Spectrum48()
del mach
class test_derive(unittest.TestCase):
def runTest(self):
import zx
class speccy(zx.Spectrum48):
pass
mach = speccy()
del mach
class test_get_memory(unittest.TestCase):
def runTest(self):
import zx
mach = zx.Spectrum48()
mem = mach.get_memory()
assert len(mem) == 0x10000
assert mem[0] == 0xf3, '0x%x' % mem[0]
mem[0] += 1
assert mem[0] == 0xf4, '0x%x' % mem[0]
class test_render_frame(unittest.TestCase):
def runTest(self):
import zx
mach = zx.Spectrum48()
data = mach.render_screen()
assert len(data) == 49280
class test_get_frame_pixels(unittest.TestCase):
def runTest(self):
import zx
mach = zx.Spectrum48()
mach.render_screen()
pixels = mach.get_frame_pixels()
assert len(pixels) == 394240
class test_execute_frame(unittest.TestCase):
def runTest(self):
import zx
mach = zx.Spectrum48()
mem = mach.get_memory()
mem[0:2] = bytearray([0x18, 0x100 - 2]) # jr $
mach.run()
class test_keyboard(unittest.TestCase):
def runTest(self):
import zx
info = zx.KEYS_INFO
assert info['3']['number'] == 2
assert info['N']['halfrow_number'] == 7
assert info['H']['pos_in_halfrow'] == 0
assert info['C']['is_leftside'] == True
assert info['E']['is_rightside'] == False
assert info['R']['address_line'] == 10
assert info['5']['port_bit'] == 4
'''
if __name__ == '__main__':
unittest.main()
|
zx
|
/zx-0.7.0.tar.gz/zx-0.7.0/test/test_spectrum48.py
|
test_spectrum48.py
|
# -*- coding: utf-8 -*-
# ZX Spectrum Emulator.
# https://github.com/kosarev/zx
#
# Copyright (C) 2017-2019 Ivan Kosarev.
# [email protected]
#
# Published under the MIT license.
import gc, unittest
''' XFAIL
class test_spectrum48_rom(unittest.TestCase):
def runTest(self):
import zx
rom = zx.get_rom_image('ZX Spectrum 48K')
assert type(rom) is bytes
assert len(rom) == 0x4000
assert rom.startswith(b'\xf3\xaf\x11\xff')
'''
if __name__ == '__main__':
unittest.main()
|
zx
|
/zx-0.7.0.tar.gz/zx-0.7.0/test/test_roms.py
|
test_roms.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ZX Spectrum Emulator.
# https://github.com/kosarev/zx
#
# Copyright (C) 2017-2019 Ivan Kosarev.
# [email protected]
#
# Published under the MIT license.
import sys
class Reg(object):
def __init__(self, name):
self.name = name
class RegPair(object):
def __init__(self, name):
self.name = name
A = Reg('a')
B = Reg('b')
C = Reg('c')
D = Reg('d')
E = Reg('e')
H = Reg('h')
L = Reg('l')
R = Reg('r')
BC = RegPair('bc')
DE = RegPair('de')
HL = RegPair('hl')
class Command:
pass
class Load(Command):
def __init__(self, reg, value):
self.reg = reg
self.value = value
class OutA(Command):
def __init__(self, x, y):
self.beam_pos = x, y
class WriteAtHL(Command):
def __init__(self, reg):
self.reg = reg
class WriteScreenAtHL(Command):
def __init__(self, x, y, reg):
self.beam_pos = x, y
self.reg = reg
class drawing_generator(object):
SCREEN_WIDTH = 256
SCREEN_HEIGHT = 192
TICKS_PER_LINE = 224
PIXELS_PER_TICK = 2
def __init__(self, is_late_timings=False):
self.is_late_timings = is_late_timings
self._lines = []
self._label = 0
self._tick = 38
def add_line(self, line):
# print(line)
self._lines.append(line)
def add_label(self):
n = self._label
self._label += 1
return 'l%d' % n
def add_instr(self, instr, ticks):
if type(ticks) is not list:
ticks = [ticks]
self.add_line(' %-24s; %5d %5s' % (
instr, self._tick, ' + '.join('%d' % x for x in ticks)))
self._tick += sum(ticks)
def generate_load(self, load):
if isinstance(load.reg, RegPair):
self.add_instr('ld %s, 0x%04x' % (load.reg.name, load.value), 10)
else:
self.add_instr('ld %s, 0x%02x' % (load.reg.name, load.value), 7)
def generate_out_a(self, out):
self.move_to_beam_pos(out.beam_pos, ticks_to_reserve=7)
self.add_instr('out (0xfe), a', 11)
def generate_write_at_hl(self, write):
delay = self.get_memory_contention_delay(self._tick + 4)
self.add_instr('ld (hl), %s' % write.reg.name, [7, delay])
def generate_write_screen_at_hl(self, write):
self.move_to_beam_pos(write.beam_pos, ticks_to_reserve=4)
self.generate_write_at_hl(write)
_COMMAND_GENERATORS = {
Load: generate_load,
OutA: generate_out_a,
WriteAtHL: generate_write_at_hl,
WriteScreenAtHL: generate_write_screen_at_hl,
}
def generate_command(self, command):
self._COMMAND_GENERATORS[type(command)](self, command)
def _get_reg_pair_clobbers(self, clobbers):
if B in clobbers and C in clobbers:
yield BC
if D in clobbers and E in clobbers:
yield DE
if H in clobbers and L in clobbers:
yield HL
# TODO: Test and optimize.
def generate_delay(self, delay, clobbers=[], step=0):
while delay:
# print('delay:', delay)
if (delay < 4 or delay == 5) and step:
delay += step
continue
if delay == 4:
self.add_instr('nop', 4)
break
if delay == 6:
done = False
for rp in self._get_reg_pair_clobbers(clobbers):
self.add_instr('inc %s' % rp.name, 6)
done = True
break
if done:
break
if step:
delay += step
continue
assert 0, ('No register pair to clobber to generate a '
'delay of %d ticks!' % delay)
if delay == 7:
self.add_instr('or 0', 7)
break
if delay == 9:
# TODO: Other options are:
# ld a, r
# ld a, i
# ld i, a
if R in clobbers:
self.add_instr('ld r, a', 9)
break
elif step:
delay += step
continue
else:
assert 0, ('Need to clobber the register R to generate a '
'delay of %d ticks!' % delay)
if delay == 10:
label = self.add_label();
self.add_instr('jp %s' % label, 10)
self.add_line(label + ':')
break
if delay == 13:
# TODO: 13 = 4 + 9, where the 9 needs a clobber.
assert 0
NON_CLOBBERING_PATTERNS = {
# TODO: Add the missing values.
8: [4, 4],
11: [4, 7],
12: [4, 4, 4],
14: [7, 7],
15: [4, 4, 7],
16: [4, 4, 4, 4],
17: [7, 10],
18: [4, 7, 7],
19: [4, 4, 4, 7],
20: [4, 4, 4, 4, 4],
21: [7, 7, 7],
22: [4, 4, 7, 7],
23: [4, 4, 4, 4, 7],
24: [4, 4, 4, 4, 4, 4],
25: [4, 7, 7, 7],
26: [4, 4, 4, 7, 7],
27: [4, 4, 4, 4, 4, 7],
28: [4, 4, 4, 4, 4, 4, 4],
29: [4, 4, 7, 7, 7],
30: [4, 4, 4, 4, 7, 7],
31: [4, 4, 4, 4, 4, 4, 7],
}
if delay in NON_CLOBBERING_PATTERNS:
pattern = NON_CLOBBERING_PATTERNS[delay]
assert sum(pattern) == delay
for d in pattern:
self.generate_delay(d)
break
# TODO: Use djnz loops when they are a good fit.
LOOP_THRESHOLD = 2 * 16 + 7 - 5
if delay >= LOOP_THRESHOLD:
assert clobbers, 'No clobber register for a delay loop!'
clobber = clobbers[0]
n = (delay - 7 + 5) // 16 - 1
n = min(n, 0xff) # TODO: Support longer loops.
assert n > 0
self.add_instr('ld %s, %d' % (clobber.name, n), 7)
delay -= 7
label = self.add_label()
ticks = n * 16 - 5
self.add_line('%-28s; %5d %5d' % (
label + ':', self._tick, ticks))
self.add_line(' %-24s; %5d' % (
'dec %s' % clobber.name, 4))
self.add_line(' %-24s; %5s' % (
'jr nz, %s' % label, '7 + 5'))
self._tick += ticks
delay -= ticks
# Make sure the remaining delay won't cause us
# trouble.
assert (delay >= LOOP_THRESHOLD or
delay in NON_CLOBBERING_PATTERNS), delay
continue
assert 0, "Don't know how to generate a delay of %d ticks!" % delay
def move_to_tick(self, tick, clobbers=[], step=0):
assert tick >= self._tick
self.generate_delay(tick - self._tick, clobbers, step)
def align_tick(self, div, rem):
aligned_tick = (self._tick + (div - rem - 1)) // div * div + rem
self.move_to_tick(aligned_tick, clobbers=[A, B, C, R], step=div)
def align_end_tick(self):
self.align_tick(4, 1)
def move_to_beam_pos(self, pos, ticks_to_reserve):
BASE_TICK = 64 * self.TICKS_PER_LINE + 8 // 2
if self.is_late_timings:
BASE_TICK += 1
x, y = pos
target_tick = (BASE_TICK + y * self.TICKS_PER_LINE + x // 2 -
ticks_to_reserve)
self.move_to_tick(target_tick, clobbers=[B])
def get_memory_contention_delay(self, tick):
# TODO: We sample ~INT during the last tick of the
# previous instruction, so we add 1 to the contention
# base to compensate that.
CONT_BASE = 14335 + 1
if self.is_late_timings:
CONT_BASE += 1
if tick < CONT_BASE:
return 0
if tick >= CONT_BASE + self.SCREEN_HEIGHT * self.TICKS_PER_LINE:
return 0
ticks_since_new_line = (tick - CONT_BASE) % self.TICKS_PER_LINE
if ticks_since_new_line >= self.SCREEN_WIDTH / self.PIXELS_PER_TICK:
return 0
ticks_since_new_ula_cycle = ticks_since_new_line % 8
delay = (0 if ticks_since_new_ula_cycle == 7 else
6 - ticks_since_new_ula_cycle)
return delay
def generate(self, *commands):
for command in commands:
self.generate_command(command)
self.align_end_tick()
self.add_line(' %-24s; %5d' % ('', self._tick))
def emit_source(self):
for line in self._lines:
print(line)
is_late_timings = len(sys.argv) > 1 and sys.argv[1] == 'late_timings'
g = drawing_generator(is_late_timings)
g.generate(
# Let the border be (mostly) yellow.
Load(A, 6), OutA(0, -60),
# Use the spare time to prepare the screen and attributes areas.
Load(A, 0x00),
Load(HL, 0x4000), WriteAtHL(A),
Load(A, 0xff),
Load(HL, 0x4001), WriteAtHL(A),
Load(HL, 0x4002), WriteAtHL(A),
Load(HL, 0x4100), WriteAtHL(A),
Load(HL, 0x4101), WriteAtHL(A),
Load(HL, 0x4102), WriteAtHL(A),
Load(HL, 0x4200), WriteAtHL(A),
Load(A, 0x00),
Load(HL, 0x4201), WriteAtHL(A),
Load(A, 0xff),
Load(HL, 0x4202), WriteAtHL(A),
Load(HL, 0x4300), WriteAtHL(A),
Load(HL, 0x4301), WriteAtHL(A),
Load(HL, 0x4302), WriteAtHL(A),
Load(HL, 0x4400), WriteAtHL(A),
Load(HL, 0x4401), WriteAtHL(A),
Load(A, 0x00),
Load(HL, 0x4402), WriteAtHL(A),
Load(A, 0xff),
Load(HL, 0x4500), WriteAtHL(A),
Load(HL, 0x4501), WriteAtHL(A),
Load(HL, 0x4502), WriteAtHL(A),
Load(A, 0xff),
Load(HL, 0x5820), WriteAtHL(A),
Load(HL, 0x5841), WriteAtHL(A),
Load(HL, 0x5862), WriteAtHL(A),
# Draw eight colour lines, each line starting one tick later
# and via that make the moment when the border value is
# latched be visible.
Load(A, 0), OutA(-16, -16),
Load(A, 5), OutA(-14, -14),
Load(A, 2), OutA(-12, -12),
Load(A, 4), OutA(-10, -10),
# This black line starts one chunk later.
Load(A, 0), OutA(-8, -8),
Load(A, 5), OutA(-6, -6),
Load(A, 2), OutA(-4, -4),
Load(A, 4), OutA(-2, -2),
# Continue the frame with yellow border again.
Load(A, 0), OutA(256 - 2, -1),
Load(A, 6), OutA(256 + 64, -1),
# This write is early enough to clear the chunk of pixels
# before it is latched.
Load(A, 0xff),
Load(HL, 0x4000), WriteScreenAtHL(-10, 0, A),
# But this one is too late.
Load(A, 0x00),
Load(HL, 0x4100), WriteScreenAtHL(-8, 1, A),
# Similarly, for the second chunk in line, this is early
# enough to clear it.
Load(A, 0xff),
Load(HL, 0x4201), WriteScreenAtHL(-10, 2, A),
# But this is again too late. Meaning both the adjacent
# chunks are latched during the same ULA delay.
Load(A, 0x00),
Load(HL, 0x4301), WriteScreenAtHL(-8, 3, A),
# Now let's see when the third chunk is latched so we know
# the length of the 16-pixel cycles.
Load(A, 0xff),
Load(HL, 0x4402), WriteScreenAtHL(6, 4, A),
Load(A, 0x00),
Load(HL, 0x4502), WriteScreenAtHL(8, 5, A),
# Now write some attribute bytes. This write is early enough
# to colour the attribute square with black before it's
# latched the first time.
Load(A, 0x00),
Load(HL, 0x5820), WriteScreenAtHL(-10, 8, A),
# This write is too late, so it should remain invisible.
Load(A, 0x6d),
WriteScreenAtHL(-8, 12, A),
# Make sure the rest of the square is black.
Load(A, 0x00),
WriteScreenAtHL(128, 12, A),
# Do the same with the 2nd attribute byte in line.
Load(A, 0x00),
Load(HL, 0x5841), WriteScreenAtHL(-10, 16, A),
Load(A, 0x6d),
WriteScreenAtHL(-8, 20, A),
Load(A, 0x00),
WriteScreenAtHL(128, 20, A),
# Do it once again for the 3rd attribute byte in line.
Load(A, 0x00),
Load(HL, 0x5862), WriteScreenAtHL(6, 24, A),
Load(A, 0x6d),
WriteScreenAtHL(8, 28, A),
Load(A, 0x00),
WriteScreenAtHL(128, 28, A),
)
g.emit_source()
|
zx
|
/zx-0.7.0.tar.gz/zx-0.7.0/test/screen_timing/generate_drawing.py
|
generate_drawing.py
|
def print_lol(the_list):
for each_movie in the_list:
if isinstance(each_movie,list):
print_lol(each_movie)
else:
print(each_movie)
|
zx1995_nester
|
/zx1995_nester-1.0.0.zip/zx1995_nester-1.0.0/nester.py
|
nester.py
|
from distutils.core import setup
setup(
name = 'zx1995_nester',
version = '1.0.0',
py_modules = ['nester'],
author = 'hypython',
author_email = '[email protected]',
)
|
zx1995_nester
|
/zx1995_nester-1.0.0.zip/zx1995_nester-1.0.0/setup.py
|
setup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
#
# This is the Parser for the ZXBASM (ZXBasic Assembler)
# ----------------------------------------------------------------------
import sys
import libzxbasm
if __name__ == '__main__':
sys.exit(libzxbasm.main())
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/zxbasm.py
|
zxbasm.py
|

[](https://travis-ci.com/boriel/zxbasic)
[](./LICENSE.txt)
[](https://pypi.python.org/pypi/zxbasic)
ZX BASIC
--------
Copyleft (K) 2008, Jose Rodriguez-Rosa (a.k.a. Boriel) <http://www.boriel.com>
All files in this project are covered under the [GPLv3 LICENSE](http://www.gnu.org/licenses/gpl.html)
except those placed in directories `library/` and `library-asm`.
Those are licensed under [MIT license](https://en.wikipedia.org/wiki/MIT_License) unless otherwise
specified in the files themselves (i.e. a different license). Anyway, all of
the licenses for files under those directories allow binary closed-source
(i.e. commercial) distribution of the files created with this compiler.
You can create closed-source programs (even commercial ones) with this compiler
(a mention to this tool will be welcome, though). But you are not allowed to
release the compiler itself as a closed source program.
If you modify *this* project (the compiler .py or anything licensed as GPLv3)
in any way you MUST publish the changes you made and submit your contribution
to the community under the same license.
-------------------------
DOCUMENTATION
-------------
This is a very little help file.
- For DOCUMENTATION in English go to the [ZX BASIC docs](https://zxbasic.readthedocs.io/en/latest/).
- For help, support, updates meet the community at the [forum](https://www.boriel.com/forum).
INSTALLATION
------------
Go to the [ZXBasic download page](https://zxbasic.readthedocs.io/en/latest/archive/)
and get the version most suitable for you.
There are, basically, two flavors (both with identical capabilities):
- For Windows you can download de win32 executable (Windows .exe zip package) version.
To install just uncompress it in a directory of your choice.
The main executable is `zxb.exe` (more on this later). With this toolchain
also comes `zxbasm.exe` (the assembler) and `zxbpp.exe` (the preprocessor), but these
are not needed when programming in BASIC.
- For Linux and Mac OSX there is a python version, so you will need a python
interpreter (available on many platforms, and usually already installed in Linux and Mac OSX).
Just uncompress it in a directory of your choice and installation is done. :-)
The main executables are `zxb.py` (the compiler), `zxbasm.py` (the assembler) and `zxbpp.py` (the preprocessor).
You can use this version in Windows, but will need to install a python interpreter first.
##### Examples
||||
|---|---|---|
| An in-game screenshot of Eleuterio by @*na_th_an* | Ingame screenshot of _El Hobbit_ by @*Wilco2000*| Ingame screenshot of _Knignt and Demonds DX_ by Einar Saukas
See more examples at the [Relased Programs](https://zxbasic.readthedocs.io/en/latest/released_programs/) page.
QUICK START
-----------
For a quick start, just open a terminal in your PC in the same directory you uncompressed ZX Basic
and type `zxb` (on Windows) or `zxb.py` (OSX, Linux). You should see a zxbasic message like this:
```
usage: zxb [-h] [-d] [-O OPTIMIZE] [-o OUTPUT_FILE] [-T] [-t] [-B] [-a] [-A]
[-S ORG] [-e STDERR] [--array-base ARRAY_BASE]
[--string-base STRING_BASE] [-Z] [-H HEAP_SIZE] [--debug-memory]
[--debug-array] [--strict-bool] [--enable-break] [-E] [--explicit]
[-D DEFINES] [-M MEMORY_MAP] [-i] [-I INCLUDE_PATH] [--strict]
[--version]
PROGRAM
zxb: error: the following arguments are required: PROGRAM
```
Create a text file with the following content:
~~~~
10 CLS
20 PRINT "HELLO WORLD!"
~~~~
Save it as `hello.bas` and finally compile it with:
~~~~
zxb -taB hello.bas
~~~~
If everything went well, a file named `hello.tap` should be created.
Open it with your favourite emulator (i.e. fuse) and see the result.
Congratulations! You're now ready to create compiled BASIC programs for
your machine. Check and compile the examples included in the examples/ folder
or go to the [documentation page](https://zxbasic.readthedocs.io/en/latest/) for further info.
AKNOWLEDGEMENTS
---------------
These are some people who has contributed in a way or another. I consider
some of them co-authors (Britlion, LCD) of this project.
Thanks to:
* Andre Adrian [adrianandre AT compuserve.de] from which I ripped the 32 bits
Z80 MULT and DIV routines.
See: http://www.andreadrian.de/oldcpu/Z80_number_cruncher.html
* Matthew Wilson [matthew AT mjwilson.demon.co.uk] and
Andy [fract AT zx-81.co.uk] from comp.sys.sinclair for their help on ROM FP-CALC usage.
* [Mulder](http://www.worldofspectrum.org/forums/member.php?u=1369) from World Of Spectrum
for finding the nasty PRINT AT bug and the GTU8 bug.
See: http://www.worldofspectrum.org/forums/showthread.php?p=278416&posted=1#post278416
* [Compiuter](http://www.speccy.org/foro/memberlist.php?mode=viewprofile&u=73) from
Speccy.org for finding a bug in PRINT OVER 1 routine.
* [Britlion](https://www.boriel.com/forum/member.php?action=profile&uid=129)
for his HUGE contribution (both in optimizations, ideas and libraries).
* [LCD](http://members.inode.at/838331/index.html)
Author of the [BorIDE](http://members.inode.at/838331/pc/BorIDE%20v0.5%20(Version%20from%2003th%20March%202013).zip), which has also made many contributions to the project.
* There are several more contributions (e.g. Thanks to them for their intensive testing!). And thank you all
(the entire community) for your interest!
If you have contributed in some way to this project, please, tell me so I'll add you to this list.
------
[](https://ko-fi.com/H2H81J0OU)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/README.md
|
README.md
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: ts=4:sw=4:et:
import sys
import libzxbc
if __name__ == '__main__':
print('-' * 48 + '\n* WARNING: zxb is deprecated! Use zxbc instead *\n' + '-' * 48, file=sys.stderr)
sys.exit(libzxbc.main()) # Exit
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/zxb.py
|
zxb.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: ts=4:sw=4:et:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
#
# This is the Parser for the ZXBpp (ZXBasic Preprocessor)
# ----------------------------------------------------------------------
import sys
import libzxbpp
if __name__ == '__main__':
sys.exit(libzxbpp.entry_point())
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/zxbpp.py
|
zxbpp.py
|
# -*- coding: utf-8 -*-
import pathlib
from setuptools import setup
packages = [
'api',
'arch',
'arch.zx48k',
'arch.zx48k.backend',
'arch.zx48k.optimizer',
'arch.zx48k.peephole',
'ast_',
'libzxbasm',
'libzxbc',
'libzxbpp',
'libzxbpp.prepro',
'outfmt',
'parsetab',
'ply',
'symbols'
]
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
package_data = {'': ['*'], 'arch.zx48k.peephole': ['opts/*']}
entry_points = {
'console_scripts': ['zxb = libzxbc.zxb:main',
'zxbasm = libzxbasm.zxbasm:main',
'zxbc = libzxbc.zxb:main',
'zxbpp = libzxbpp.zxbpp:entry_point']
}
setup_kwargs = {
'name': 'zxbasic',
'version': '1.12.0',
'description': "Boriel's ZX BASIC Compiler",
'classifiers': [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.8',
],
'long_description_content_type': "text/markdown",
'long_description': README,
'author': 'Jose Rodriguez',
'author_email': '[email protected]',
'maintainer': None,
'maintainer_email': None,
'url': 'http://zxbasic.net',
'packages': packages,
'package_data': package_data,
'entry_points': entry_points,
'python_requires': '>=3.6,<4.0',
}
setup(**setup_kwargs)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/setup.py
|
setup.py
|
# PLY (Python Lex-Yacc)
Copyright (C) 2001-2020
David M. Beazley (Dabeaz LLC)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the David Beazley or Dabeaz LLC may be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Introduction
============
PLY is a 100% Python implementation of the common parsing tools lex
and yacc. Here are a few highlights:
- PLY is very closely modeled after traditional lex/yacc.
If you know how to use these tools in C, you will find PLY
to be similar.
- PLY provides *very* extensive error reporting and diagnostic
information to assist in parser construction. The original
implementation was developed for instructional purposes. As
a result, the system tries to identify the most common types
of errors made by novice users.
- PLY provides full support for empty productions, error recovery,
precedence specifiers, and moderately ambiguous grammars.
- Parsing is based on LR-parsing which is fast, memory efficient,
better suited to large grammars, and which has a number of nice
properties when dealing with syntax errors and other parsing problems.
Currently, PLY builds its parsing tables using the LALR(1)
algorithm used in yacc.
- PLY uses Python introspection features to build lexers and parsers.
This greatly simplifies the task of parser construction since it reduces
the number of files and eliminates the need to run a separate lex/yacc
tool before running your program.
- PLY can be used to build parsers for "real" programming languages.
Although it is not ultra-fast due to its Python implementation,
PLY can be used to parse grammars consisting of several hundred
rules (as might be found for a language like C). The lexer and LR
parser are also reasonably efficient when parsing typically
sized programs. People have used PLY to build parsers for
C, C++, ADA, and other real programming languages.
How to Use
==========
PLY consists of two files : lex.py and yacc.py. These are contained
within the `ply` directory which may also be used as a Python package.
To use PLY, simply copy the `ply` directory to your project and import
lex and yacc from the associated `ply` package. For example:
```python
from .ply import lex
from .ply import yacc
```
Alternatively, you can copy just the files lex.py and yacc.py
individually and use them as modules however you see fit. For example:
```python
import lex
import yacc
```
If you wish, you can use the install.py script to install PLY into
virtual environment.
PLY has no third-party dependencies.
The docs/ directory contains complete documentation on how to use
the system. Documentation available at https://ply.readthedocs.io
The example directory contains several different examples including a
PLY specification for ANSI C as given in K&R 2nd Ed.
A simple example is found at the end of this document
Requirements
============
PLY requires the use of Python 3.6 or greater. However, you should
use the latest Python release if possible. It should work on just
about any platform.
Note: PLY does not support execution under `python -OO`. It can be
made to work in that mode, but you'll need to change the programming
interface with a decorator. See the documentation for details.
Resources
=========
Official Documentation is available at:
* https://ply.readthedocs.io
More information about PLY can be obtained on the PLY webpage at:
* http://www.dabeaz.com/ply
For a detailed overview of parsing theory, consult the excellent
book "Compilers : Principles, Techniques, and Tools" by Aho, Sethi, and
Ullman. The topics found in "Lex & Yacc" by Levine, Mason, and Brown
may also be useful.
The GitHub page for PLY can be found at:
* https://github.com/dabeaz/ply
Acknowledgments
===============
A special thanks is in order for all of the students in CS326 who
suffered through about 25 different versions of these tools :-).
The CHANGES file acknowledges those who have contributed patches.
Elias Ioup did the first implementation of LALR(1) parsing in PLY-1.x.
Andrew Waters and Markus Schoepflin were instrumental in reporting bugs
and testing a revised LALR(1) implementation for PLY-2.0.
Example
=======
Here is a simple example showing a PLY implementation of a calculator
with variables.
```python
# -----------------------------------------------------------------------------
# calc.py
#
# A simple calculator with variables.
# -----------------------------------------------------------------------------
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Ignored characters
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print(f"Illegal character {t.value[0]!r}")
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
lex.lex()
# Precedence rules for the arithmetic operators
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names (for storing variables)
names = { }
def p_statement_assign(p):
'statement : NAME EQUALS expression'
names[p[1]] = p[3]
def p_statement_expr(p):
'statement : expression'
print(p[1])
def p_expression_binop(p):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if p[2] == '+' : p[0] = p[1] + p[3]
elif p[2] == '-': p[0] = p[1] - p[3]
elif p[2] == '*': p[0] = p[1] * p[3]
elif p[2] == '/': p[0] = p[1] / p[3]
def p_expression_uminus(p):
'expression : MINUS expression %prec UMINUS'
p[0] = -p[2]
def p_expression_group(p):
'expression : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_number(p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_name(p):
'expression : NAME'
try:
p[0] = names[p[1]]
except LookupError:
print(f"Undefined name {p[1]!r}")
p[0] = 0
def p_error(p):
print(f"Syntax error at {p.value!r}")
import ply.yacc as yacc
yacc.yacc()
while True:
try:
s = input('calc > ')
except EOFError:
break
yacc.parse(s)
```
Bug Reports and Patches
=======================
My goal with PLY is to simply have a decent lex/yacc implementation
for Python. As a general rule, I don't spend huge amounts of time
working on it unless I receive very specific bug reports and/or
patches to fix problems. At this time, PLY is mature software and new
features are no longer being added. If you think you have found a
bug, please visit the PLY Github page at https://github.com/dabeaz/ply
to report an issue.
Take a Class!
=============
If you'd like to learn more about compiler principles and have a go at
implementing a compiler, come take a course.
https://www.dabeaz.com/compiler.html.
-- Dave
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/ply/README.md
|
README.md
|
# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2020
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Latest version: https://github.com/dabeaz/ply
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammar is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import inspect
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = False # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
resultlimit = 40 # Size limit of results when running in debug mode.
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def set_lexpos(self, n, lexpos):
self.slice[n].lexpos = lexpos
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
# parse().
#
# This is the core parsing engine. To operate, it requires a lexer object.
# Two options are provided. The debug flag turns on debugging so that you can
# see the various rule reductions and parsing steps. tracking turns on position
# tracking. In this mode, symbols will record the starting/ending line number and
# character index.
def parse(self, input=None, lexer=None, debug=False, tracking=False):
# If debugging has been specified as a flag, turn it into a logging object
if isinstance(debug, int) and debug:
debug = PlyLogger(sys.stderr)
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
if debug:
debug.info('PLY: PARSE DEBUG START')
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
# Set the token function
get_token = self.token = lexer.token
# Set up the state and symbol stacks
statestack = self.statestack = [] # Stack of parsing states
symstack = self.symstack = [] # Stack of grammar symbols
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if debug:
debug.debug('State : %s', state)
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if debug:
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
if debug:
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
if debug:
debug.debug('Action : Shift and goto state %s', t)
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if debug:
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
if debug:
debug.info('Result : %s', format_result(pslice[0]))
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
else:
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
if debug:
debug.info('Result : %s', format_result(pslice[0]))
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
if debug:
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
return result
if t is None:
if debug:
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = self.errorfunc(errtoken)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
statestack.pop()
state = statestack[-1]
continue
# If we'r here, something really bad happened
raise RuntimeError('yacc: internal parser error!!!\n')
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = self.Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods.
# -----------------------------------------------------------------------------
class LRTable:
def __init__(self, grammar, log=None):
self.grammar = grammar
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagnostic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
laheads = p.lookaheads[st]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = sorted(tokens)
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, str):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, str):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(*, debug=yaccdebug, module=None, start=None,
check_recursion=True, optimize=False, debugfile=debug_file,
debuglog=None, errorlog=None):
# Reference to the parsing method of the last built parser
global parse
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ or __package__ attributes are available, try to obtain them
# from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
if '__package__' not in pdict and '__module__' in pdict:
if hasattr(sys.modules[pdict['__module__']], '__package__'):
pdict['__package__'] = sys.modules[pdict['__module__']].__package__
else:
pdict = get_caller_module_dict(2)
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(debugfile, 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY (http://www.dabeaz.com/ply)')
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRTable on the grammar
lr = LRTable(grammar, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/ply/yacc.py
|
yacc.py
|
# PLY package
# Author: David Beazley ([email protected])
# https://dabeaz.com/ply/index.html
__version__ = '4.0'
__all__ = ['lex','yacc']
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/ply/__init__.py
|
__init__.py
|
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2020
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Latest version: https://github.com/dabeaz/ply
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains acceptable string types
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __repr__(self):
return f'LexToken({self.type},{self.value!r},{self.lineno},{self.lexpos})'
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError(f'Undefined state {state!r}')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
del tok.lexer
del self.lexmatch
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError(f"Scanning error. Illegal character {lexdata[lexpos]!r}",
lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError(f"Illegal character {lexdata[lexpos]!r} at index {lexpos}",
lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def __next__(self):
t = self.token()
if t is None:
raise StopIteration
return t
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
return { **f.f_globals, **f.f_locals }
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return [], [], []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = (len(relist) // 2) + 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error(f"Bad token name {n!r}")
self.error = True
if n in terminals:
self.log.warning(f"Token {n!r} multiply defined")
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error(f'Invalid literal {c!r}. Must be a single character')
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %r. Must be a tuple (statename,'exclusive|inclusive')", s)
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %r must be a string', name)
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %r must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State %r already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule %r must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule %r must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule %r has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule %r requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule %r", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule %r matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule %r is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule %r must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule %r defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule %r matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule %r. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule %r is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state %r", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule %r has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule %r requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(*, module=None, object=None, debug=False,
reflags=int(re.VERBOSE), debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state %r", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state %r", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
with open(filename) as f:
data = f.read()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write(f'({tok.type},{tok.value!r},{tok.lineno},{tok.lexpos})\n')
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/ply/lex.py
|
lex.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .arrayaccess import SymbolARRAYACCESS
class SymbolARRAYLOAD(SymbolARRAYACCESS):
""" This class is the same as SymbolARRAYACCESS, we just declare it to make
a distinction. (e.g. the Token is gotten from the class name)
"""
pass
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/arrayload.py
|
arrayload.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
import numbers
from api.constants import CLASS
from .symbol_ import Symbol
from .type_ import SymbolTYPE
from .type_ import Type as TYPE
from .const import SymbolCONST
def _get_val(other):
""" Given a Number, a Numeric Constant or a python number return its value
"""
assert isinstance(other, (numbers.Number, SymbolNUMBER, SymbolCONST))
if isinstance(other, SymbolNUMBER):
return other.value
if isinstance(other, SymbolCONST):
return other.expr.value
return other
class SymbolNUMBER(Symbol):
""" Defines an NUMBER symbol.
"""
def __init__(self, value, lineno, type_=None):
assert lineno is not None
assert type_ is None or isinstance(type_, SymbolTYPE)
if isinstance(value, SymbolNUMBER):
value = value.value
assert isinstance(value, numbers.Number)
super(SymbolNUMBER, self).__init__()
self.class_ = CLASS.const
if int(value) == value:
value = int(value)
else:
value = float(value)
self.value = value
if type_ is not None:
self.type_ = type_
elif isinstance(value, float):
if -32768.0 < value < 32767:
self.type_ = TYPE.fixed
else:
self.type_ = TYPE.float_
elif isinstance(value, int):
if 0 <= value < 256:
self.type_ = TYPE.ubyte
elif -128 <= value < 128:
self.type_ = TYPE.byte_
elif 0 <= value < 65536:
self.type_ = TYPE.uinteger
elif -32768 <= value < 32768:
self.type_ = TYPE.integer
elif value < 0:
self.type_ = TYPE.long_
else:
self.type_ = TYPE.ulong
self.lineno = lineno
def __str__(self):
return str(self.value)
def __repr__(self):
return "%s:%s" % (self.type_, str(self))
def __hash__(self):
return id(self)
@property
def t(self):
return str(self)
def __eq__(self, other):
if not isinstance(other, (numbers.Number, SymbolNUMBER, SymbolCONST)):
return False
return self.value == _get_val(other)
def __lt__(self, other):
return self.value < _get_val(other)
def __le__(self, other):
return self.value <= _get_val(other)
def __gt__(self, other):
return self.value > _get_val(other)
def __ge__(self, other):
return self.value >= _get_val(other)
def __add__(self, other):
return SymbolNUMBER(self.value + _get_val(other), self.lineno)
def __radd__(self, other):
return SymbolNUMBER(_get_val(other) + self.value, self.lineno)
def __sub__(self, other):
return SymbolNUMBER(self.value - _get_val(other), self.lineno)
def __rsub__(self, other):
return SymbolNUMBER(_get_val(other) - self.value, self.lineno)
def __mul__(self, other):
return SymbolNUMBER(self.value * _get_val(other), self.lineno)
def __rmul__(self, other):
return SymbolNUMBER(_get_val(other) * self.value, self.lineno)
def __truediv__(self, other):
return SymbolNUMBER(self.value / _get_val(other), self.lineno)
def __div__(self, other):
return self.__truediv__(other)
def __rtruediv__(self, other):
return SymbolNUMBER(_get_val(other) / self.value, self.lineno)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __or__(self, other):
return SymbolNUMBER(self.value | _get_val(other), self.lineno)
def __ror__(self, other):
return SymbolNUMBER(_get_val(other | self.value), self.lineno)
def __and__(self, other):
return SymbolNUMBER(self.value & _get_val(other), self.lineno)
def __rand__(self, other):
return SymbolNUMBER(_get_val(other) & self.value, self.lineno)
def __mod__(self, other):
return SymbolNUMBER(self.value % _get_val(other), self.lineno)
def __rmod__(self, other):
return SymbolNUMBER(_get_val(other) % self.value, self.lineno)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/number.py
|
number.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .call import SymbolCALL
class SymbolFUNCCALL(SymbolCALL):
""" This class is the same as CALL, we just declare it to make
a distinction. (e.g. the Token is gotten from the class name)
A FunctionCall is a Call whose return value is going to be used
later within an expression. Eg.
CALL: f(x)
FUNCCALL: a = 2 * f(x)
This distinction is useful to discard values returned using the HEAP
to avoid memory leaks.
"""
pass
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/funccall.py
|
funccall.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from api import global_
import api.symboltable
from .symbol_ import Symbol
from .function import SymbolFUNCTION
class SymbolFUNCDECL(Symbol):
""" Defines a Function declaration
"""
def __init__(self, entry, lineno):
super(SymbolFUNCDECL, self).__init__()
self.entry = entry # Symbol table entry
self.lineno = lineno # Line of this function declaration
@property
def entry(self):
return self.children[0]
@entry.setter
def entry(self, value):
assert isinstance(value, SymbolFUNCTION)
self.children = [value]
@property
def name(self):
return self.entry.name
@property
def locals_size(self):
return self.entry.locals_size
@locals_size.setter
def locals_size(self, value):
self.entry.locals_size = value
@property
def local_symbol_table(self):
return self.entry.local_symbol_table
@local_symbol_table.setter
def local_symbol_table(self, value):
assert isinstance(value, api.symboltable.SymbolTable.Scope)
self.entry.local_symbol_table = value
@property
def type_(self):
return self.entry.type_
@type_.setter
def type_(self, value):
self.entry.type_ = value
@property
def size(self):
return self.type_.size
@property
def mangled(self):
return self.entry.mangled
@classmethod
def make_node(cls, func_name, lineno, type_=None):
""" This will return a node with the symbol as a function.
"""
entry = global_.SYMBOL_TABLE.declare_func(func_name, lineno, type_=type_)
if entry is None:
return None
entry.declared = True
return cls(entry, lineno)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/funcdecl.py
|
funcdecl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
from .number import SymbolNUMBER as NUMBER
from .typecast import SymbolTYPECAST as TYPECAST
from .binary import SymbolBINARY as BINARY
from .string_ import SymbolSTRING as STRING
from .type_ import Type
import api.config
from api.check import check_type
from api.check import is_number
import api.global_ as gl
class SymbolSTRSLICE(Symbol):
""" Defines a string slice
"""
def __init__(self, string, lower, upper, lineno):
super(SymbolSTRSLICE, self).__init__(string, lower, upper)
self.string = string # Ensures is STRING via setter
self.lower = lower
self.upper = upper
self.lineno = lineno
self.type_ = Type.string
@property
def string(self):
return self.children[0]
@string.setter
def string(self, value):
assert isinstance(value, Symbol)
assert value.type_ == Type.string
self.children[0] = value
@property
def lower(self):
return self.children[1]
@lower.setter
def lower(self, value):
assert isinstance(value, Symbol)
assert value.type_ == gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE]
self.children[1] = value # TODO: typecast it to UINTEGER ??
@property
def upper(self):
return self.children[2]
@upper.setter
def upper(self, value):
assert isinstance(value, Symbol)
assert value.type_ == gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE]
self.children[2] = value
@classmethod
def make_node(cls, lineno, s, lower, upper):
""" Creates a node for a string slice. S is the string expression Tree.
Lower and upper are the bounds, if lower & upper are constants, and
s is also constant, then a string constant is returned.
If lower > upper, an empty string is returned.
"""
if lower is None or upper is None or s is None:
return None
if not check_type(lineno, Type.string, s):
return None
lo = up = None
base = NUMBER(api.config.OPTIONS.string_base.value, lineno=lineno)
lower = TYPECAST.make_node(gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE],
BINARY.make_node('MINUS', lower, base, lineno=lineno,
func=lambda x, y: x - y), lineno)
upper = TYPECAST.make_node(gl.SYMBOL_TABLE.basic_types[gl.STR_INDEX_TYPE],
BINARY.make_node('MINUS', upper, base, lineno=lineno,
func=lambda x, y: x - y), lineno)
if lower is None or upper is None:
return None
if is_number(lower):
lo = lower.value
if lo < gl.MIN_STRSLICE_IDX:
lower.value = lo = gl.MIN_STRSLICE_IDX
if is_number(upper):
up = upper.value
if up > gl.MAX_STRSLICE_IDX:
upper.value = up = gl.MAX_STRSLICE_IDX
if is_number(lower, upper):
if lo > up:
return STRING('', lineno)
if s.token == 'STRING': # A constant string? Recalculate it now
up += 1
st = s.value.ljust(up) # Procrustean filled (right)
return STRING(st[lo:up], lineno)
# a$(0 TO INF.) = a$
if lo == gl.MIN_STRSLICE_IDX and up == gl.MAX_STRSLICE_IDX:
return s
return cls(s, lower, upper, lineno)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/strslice.py
|
strslice.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
class SymbolARRAYDECL(Symbol):
""" Defines an Array declaration
"""
def __init__(self, entry):
super(SymbolARRAYDECL, self).__init__(entry)
@property
def name(self):
return self.entry.name
@property
def mangled(self):
return self.entry.mangled
@property
def entry(self):
return self.children[0]
@property
def type_(self):
return self.entry.type_
@property
def size(self):
""" Total memory size of array cells
"""
return self.type_.size * self.count
@property
def count(self):
""" Total number of array cells
"""
return self.entry.count
@property
def memsize(self):
""" Total array cell + indexes size
"""
return self.entry.memsize
@property
def bounds(self):
return self.entry.bounds
def __str__(self):
return "%s(%s)" % (self.entry.name, self.bounds)
def __repr__(self):
return str(self)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/arraydecl.py
|
arraydecl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
from .const import SymbolCONST
from .number import SymbolNUMBER
from .string_ import SymbolSTRING
from .typecast import SymbolTYPECAST
from .type_ import Type as TYPE
from api.check import common_type
from api.check import is_const
from api.check import is_number
from api.check import is_static
from api.check import is_numeric
from api.check import is_string
from api.errmsg import error
class SymbolBINARY(Symbol):
""" Defines a BINARY EXPRESSION e.g. (a + b)
Only the operator (e.g. 'PLUS') is stored.
"""
def __init__(self, operator, left, right, lineno, type_=None, func=None):
Symbol.__init__(self, left, right)
self.lineno = lineno
self.operator = operator
self.type_ = type_ if type_ is not None else common_type(left, right)
self.func = func # Will be used for constant folding at later stages if not None
@property
def left(self):
return self.children[0]
@left.setter
def left(self, value):
assert isinstance(value, Symbol)
self.children[0] = value
@property
def right(self):
return self.children[1]
@right.setter
def right(self, value):
assert isinstance(value, Symbol)
self.children[1] = value
def __str__(self):
return '%s %s %s' % (self.left, self.operator, self.right)
def __repr__(self):
return '(%s: %s %s)' % (self.operator, self.left, self.right)
@property
def size(self):
return self.type_.size
@classmethod
def make_node(cls, operator, left, right, lineno, func=None,
type_=None):
""" Creates a binary node for a binary operation,
e.g. A + 6 => '+' (A, 6) in prefix notation.
Parameters:
-operator: the binary operation token. e.g. 'PLUS' for A + 6
-left: left operand
-right: right operand
-func: is a lambda function used when constant folding is applied
-type_: resulting type (to enforce it).
If no type_ is specified the resulting one will be guessed.
"""
if left is None or right is None:
return None
a, b = left, right # short form names
# Check for constant non-numeric operations
c_type = common_type(a, b) # Resulting operation type or None
if c_type: # there must be a common type for a and b
if is_numeric(a, b) and (is_const(a) or is_number(a)) and \
(is_const(b) or is_number(b)):
if func is not None:
a = SymbolTYPECAST.make_node(c_type, a, lineno) # ensure type
b = SymbolTYPECAST.make_node(c_type, b, lineno) # ensure type
return SymbolNUMBER(func(a.value, b.value), type_=type_, lineno=lineno)
if is_static(a, b):
a = SymbolTYPECAST.make_node(c_type, a, lineno) # ensure type
b = SymbolTYPECAST.make_node(c_type, b, lineno) # ensure type
return SymbolCONST(cls(operator, a, b, lineno, type_=type_, func=func), lineno=lineno)
if operator in ('BNOT', 'BAND', 'BOR', 'BXOR', 'NOT', 'AND', 'OR',
'XOR', 'MINUS', 'MULT', 'DIV', 'SHL', 'SHR') and \
not is_numeric(a, b):
error(lineno, 'Operator %s cannot be used with STRINGS' % operator)
return None
if is_string(a, b) and func is not None: # Are they STRING Constants?
if operator == 'PLUS':
return SymbolSTRING(func(a.value, b.value), lineno)
return SymbolNUMBER(int(func(a.text, b.text)), type_=TYPE.ubyte,
lineno=lineno) # Convert to u8 (boolean)
if operator in ('BNOT', 'BAND', 'BOR', 'BXOR'):
if TYPE.is_decimal(c_type):
c_type = TYPE.long_
if a.type_ != b.type_ and TYPE.string in (a.type_, b.type_):
c_type = a.type_ # Will give an error based on the fist operand
if operator not in ('SHR', 'SHL'):
a = SymbolTYPECAST.make_node(c_type, a, lineno)
b = SymbolTYPECAST.make_node(c_type, b, lineno)
if a is None or b is None:
return None
if type_ is None:
if operator in ('LT', 'GT', 'EQ', 'LE', 'GE', 'NE', 'AND', 'OR',
'XOR', 'NOT'):
type_ = TYPE.ubyte # Boolean type
else:
type_ = c_type
return cls(operator, a, b, type_=type_, lineno=lineno)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/binary.py
|
binary.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
import api.global_ as gl
from .symbol_ import Symbol
# ----------------------------------------------------------------------
# CONST Symbol object
# ----------------------------------------------------------------------
class SymbolCONST(Symbol):
""" Defines a constant expression (not numerical, e.g. a Label or a @label,
or a more complex one like @label + 5)
"""
def __init__(self, expr, lineno):
super(SymbolCONST, self).__init__(expr)
self.lineno = lineno
self._t = gl.optemps.new_t()
@property
def expr(self):
return self.children[0]
@expr.setter
def expr(self, value):
assert isinstance(value, Symbol)
self.children = [value]
@property
def type_(self):
return self.expr.type_
def __str__(self):
return str(self.expr)
def __repr__(self):
return str(self)
@property
def t(self):
return self._t
@t.setter
def t(self, value):
self._t = value
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/const.py
|
const.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
from .argument import SymbolARGUMENT
class SymbolARGLIST(Symbol):
""" Defines a list of arguments in a function call or array access
"""
@property
def args(self):
return self.children
@args.setter
def args(self, value):
for i in value:
assert isinstance(value, SymbolARGUMENT)
self.appendChild(i)
def __getitem__(self, range_):
return self.args[range_]
def __setitem__(self, range_, value):
assert isinstance(value, SymbolARGUMENT)
self.children[range_] = value
def __str__(self):
return '(%s)' % (', '.join(str(x) for x in self.args))
def __repr__(self):
return str(self)
def __len__(self):
return len(self.args)
@classmethod
def make_node(cls, node, *args):
""" This will return a node with an argument_list.
"""
if node is None:
node = cls()
assert isinstance(node, SymbolARGUMENT) or isinstance(node, cls)
if not isinstance(node, cls):
return cls.make_node(None, node, *args)
for arg in args:
assert isinstance(arg, SymbolARGUMENT)
node.appendChild(arg)
return node
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/arglist.py
|
arglist.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
from .number import SymbolNUMBER
from .type_ import SymbolTYPE
from api.check import is_number
from api.check import is_string
class SymbolBUILTIN(Symbol):
""" Defines an BUILTIN function e.g. INKEY$(), RND() or LEN
"""
def __init__(self, lineno, fname, type_=None, *operands):
assert isinstance(lineno, int)
assert type_ is None or isinstance(type_, SymbolTYPE)
super(SymbolBUILTIN, self).__init__(*operands)
self.lineno = lineno
self.fname = fname
self.type_ = type_
@property
def type_(self):
if self._type is not None:
return self._type
return self.operand.type_
@type_.setter
def type_(self, value):
assert value is None or isinstance(value, SymbolTYPE)
self._type = value
@property
def operand(self):
return self.children[0] if self.children else None
@operand.setter
def operand(self, value):
assert isinstance(value, Symbol)
self.children[0] = value
@property
def operands(self):
return self.children
@operands.setter
def operands(self, value):
for x in value:
assert isinstance(x, Symbol)
self.children = [x for x in value]
@property
def size(self):
""" sizeof(type)
"""
if self.type_ is None:
return 0
return self.type_.size
@classmethod
def make_node(cls, lineno, fname, func=None, type_=None, *operands):
""" Creates a node for a unary operation. E.g. -x or LEN(a$)
Parameters:
-func: function used on constant folding when possible
-type_: the resulting type (by default, the same as the argument).
For example, for LEN (str$), result type is 'u16'
and arg type is 'string'
"""
if func is not None and len(operands) == 1: # Try constant-folding
if is_number(operands[0]) or is_string(operands[0]): # e.g. ABS(-5)
return SymbolNUMBER(func(operands[0].value), type_=type_, lineno=lineno)
return cls(lineno, fname, type_, *operands)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/builtin.py
|
builtin.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
from .type_ import SymbolTYPE
from .type_ import Type as TYPE
from .number import SymbolNUMBER
from .vararray import SymbolVARARRAY
from api.errmsg import error
from api import errmsg
from api.check import is_number
from api.check import is_CONST
from api.check import is_const
class SymbolTYPECAST(Symbol):
""" Defines a typecast operation.
"""
def __init__(self, new_type, operand, lineno):
assert isinstance(new_type, SymbolTYPE)
super(SymbolTYPECAST, self).__init__(operand)
self.lineno = lineno
self.type_ = new_type
# The converted (typecast) node
@property
def operand(self):
return self.children[0]
@operand.setter
def operand(self, operand_):
assert isinstance(operand_, Symbol)
self.children[0] = operand_
@classmethod
def make_node(cls, new_type, node, lineno):
""" Creates a node containing the type cast of
the given one. If new_type == node.type, then
nothing is done, and the same node is
returned.
Returns None on failure (and calls syntax_error)
"""
assert isinstance(new_type, SymbolTYPE)
# None (null) means the given AST node is empty (usually an error)
if node is None:
return None # Do nothing. Return None
assert isinstance(node, Symbol), '<%s> is not a Symbol' % node
# The source and dest types are the same
if new_type == node.type_:
return node # Do nothing. Return as is
# TODO: Create a base scalar type
if isinstance(node, SymbolVARARRAY):
if new_type.size == node.type_.size and TYPE.string not in (new_type, node.type_):
return node
error(lineno, "Array {} type does not match parameter type".format(node.name))
return None
STRTYPE = TYPE.string
# Typecasting, at the moment, only for number
if node.type_ == STRTYPE:
error(lineno, 'Cannot convert string to a value. Use VAL() function')
return None
# Converting from string to number is done by STR
if new_type == STRTYPE:
error(lineno, 'Cannot convert value to string. Use STR() function')
return None
# If the given operand is a constant, perform a static typecast
if is_CONST(node):
node.expr = cls(new_type, node.expr, lineno)
return node
if not is_number(node) and not is_const(node):
return cls(new_type, node, lineno)
# It's a number. So let's convert it directly
if is_const(node):
node = SymbolNUMBER(node.value, node.lineno, node.type_)
if new_type.is_basic and not TYPE.is_integral(new_type): # not an integer
node.value = float(node.value)
else: # It's an integer
new_val = (int(node.value) & ((1 << (8 * new_type.size)) - 1)) # Mask it
if node.value >= 0 and node.value != new_val:
errmsg.warning_conversion_lose_digits(node.lineno)
node.value = new_val
elif node.value < 0 and (1 << (new_type.size * 8)) + \
node.value != new_val: # Test for positive to negative coercion
errmsg.warning_conversion_lose_digits(node.lineno)
node.value = new_val - (1 << (new_type.size * 8))
node.type_ = new_type
return node
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/typecast.py
|
typecast.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
class SymbolASM(Symbol):
""" Defines an ASM sentence
"""
def __init__(self, asm, lineno):
super(SymbolASM, self).__init__()
self.asm = asm
self.lineno = lineno
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/asm.py
|
asm.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from api.constants import CLASS
from .var import SymbolVAR
from .symbol_ import Symbol
class SymbolLABEL(SymbolVAR):
prefix = '__LABEL__'
def __init__(self, name, lineno):
super(SymbolLABEL, self).__init__(name, lineno)
self.class_ = CLASS.label
self._scope_owner = [] # list of nested functions containing this label (scope)
@property
def t(self):
""" t property is constant for labels
"""
return self.prefix + self.name
@property
def accessed(self):
return self._accessed
@accessed.setter
def accessed(self, value):
self._accessed = bool(value)
if self._accessed:
for entry in self.scope_owner:
entry.accessed = True
@property
def scope_owner(self):
return list(self._scope_owner)
@scope_owner.setter
def scope_owner(self, entries):
assert all(isinstance(x, Symbol) for x in entries)
self._scope_owner = list(entries)
self.accessed = self._accessed # if true, refresh scope_owners
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/label.py
|
label.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
import api.global_ as gl
from api.check import check_call_arguments
from api.constants import CLASS
import api.errmsg as errmsg
from .symbol_ import Symbol
from .function import SymbolFUNCTION
from .arglist import SymbolARGLIST
from .argument import SymbolARGUMENT
from .var import SymbolVAR
from .type_ import Type
class SymbolCALL(Symbol):
""" Defines function / procedure call. E.g. F(1, A + 2)
It contains the symbol table entry of the called function (e.g. F)
And a list of arguments. (e.g. (1, A + 2) in this example).
Parameters:
id_: The symbol table entry
arglist: a SymbolARGLIST instance
lineno: source code line where this call was made
"""
def __init__(self, entry: SymbolFUNCTION, arglist, lineno):
super(SymbolCALL, self).__init__()
assert isinstance(lineno, int)
assert all(isinstance(x, SymbolARGUMENT) for x in arglist)
self.entry = entry
self.args = arglist # Func. call / array access
self.lineno = lineno
if entry.token == 'FUNCTION':
for arg, param in zip(arglist, entry.params): # Sets dependency graph for each argument -> parameter
if arg.value is not None:
arg.value.add_required_symbol(param)
@property
def entry(self):
return self.children[0]
@entry.setter
def entry(self, value):
assert isinstance(value, SymbolFUNCTION)
if self.children is None or not self.children:
self.children = [value]
else:
self.children[0] = value
@property
def args(self):
return self.children[1]
@args.setter
def args(self, value):
assert isinstance(value, SymbolARGLIST)
if self.children is None or not self.children:
self.children = [None]
if len(self.children) < 2:
self.children.append(value)
return
self.children[1] = value
@property
def type_(self):
return self.entry.type_
@classmethod
def make_node(cls, id_, params, lineno):
""" This will return an AST node for a function/procedure call.
"""
assert isinstance(params, SymbolARGLIST)
entry = gl.SYMBOL_TABLE.access_func(id_, lineno)
if entry is None: # A syntax / semantic error
return None
if entry.callable is False: # Is it NOT callable?
if entry.type_ != Type.string:
errmsg.syntax_error_not_array_nor_func(lineno, id_)
return None
gl.SYMBOL_TABLE.check_class(id_, CLASS.function, lineno)
entry.accessed = True
if entry.declared and not entry.forwarded:
check_call_arguments(lineno, id_, params)
else: # All functions goes to global scope by default
if not isinstance(entry, SymbolFUNCTION):
entry = SymbolVAR.to_function(entry, lineno)
gl.SYMBOL_TABLE.move_to_global_scope(id_)
gl.FUNCTION_CALLS.append((id_, params, lineno,))
return cls(entry, params, lineno)
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/call.py
|
call.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from api.constants import TYPE
from api.constants import CLASS
from api.config import OPTIONS
from api.decorator import classproperty
from .symbol_ import Symbol
class SymbolTYPE(Symbol):
""" A Type definition. Defines a type,
both user defined or basic ones.
"""
def __init__(self, name, lineno, *children):
# All children (if any) must be SymbolTYPE
assert all(isinstance(x, SymbolTYPE) for x in children)
super(SymbolTYPE, self).__init__(*children)
self.name = name # typename
self.lineno = lineno # The line the type was defined. Line 0 = basic type
self.final = self # self.final always return the original aliased type (if this type is an alias)
self.caseins = OPTIONS.case_insensitive.value # Whether this ID is case insensitive or not
self.class_ = CLASS.type_
self.accessed = False # Whether this type has been used or not
def __repr__(self):
return "%s(%s)" % (self.token, str(self))
def __str__(self):
return self.name
@property
def size(self):
return sum(x.size for x in self.children)
@property
def is_basic(self):
""" Whether this is a basic (canonical) type or not.
"""
if len(self.children) == 1:
return self.children[0].is_basic
return False
@property
def is_signed(self):
if self is not self.final:
return self.final.is_signed
if len(self.children) != 1:
return False
return self.children[0].is_signed
@property
def is_dynamic(self):
""" True if this type uses dynamic (Heap) memory.
e.g. strings or dynamic arrays
"""
if self is not self.final:
return self.final.is_dynamic
return any([x.is_dynamic for x in self.children])
@property
def is_alias(self):
""" Whether this is an alias of another type or not.
"""
return False
def __eq__(self, other):
assert isinstance(other, SymbolTYPE)
if self is not self.final:
return self.final == other
other = other.final # remove alias
if other.is_basic:
return other == self
if len(self.children) != len(other.children):
if len(self.children) == 1 and not other.children:
return self.children[0] == other
if len(other.children) == 1 and not self.children:
return other.children[0] == self
return False
return all(i == j for i, j in zip(self.children, other.children))
def __ne__(self, other):
assert isinstance(other, SymbolTYPE)
return not (self == other)
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
if self is not self.final:
return bool(self.final)
return any(x for x in self.children)
class SymbolBASICTYPE(SymbolTYPE):
""" Defines a basic type (Ubyte, Byte, etc..)
Basic (default) types are defined upon start and are case insensitive.
If name is None or '', default typename from TYPES.to_string will be used.
"""
def __init__(self, type_, name=None):
""" type_ = Internal representation (e.g. TYPE.ubyte)
"""
assert TYPE.is_valid(type_)
if not name:
name = TYPE.to_string(type_)
super(SymbolBASICTYPE, self).__init__(name, 0)
self.type_ = type_
@property
def size(self):
return TYPE.size(self.type_)
@property
def is_basic(self):
""" Whether this is a basic (canonical) type or not.
"""
return True
@property
def is_signed(self):
return TYPE.is_signed(self.type_)
def to_signed(self):
""" Returns another instance with the signed equivalent
of this type.
"""
return SymbolBASICTYPE(TYPE.to_signed(self.type_))
@property
def is_dynamic(self):
return self.type_ == TYPE.string
def __hash__(self):
return self.type_
def __eq__(self, other):
if self is not self.final:
return self.final == other
other = other.final # remove alias
if other.is_basic: # for both basic types, just compare
return self.type_ == other.type_
assert other.children # must be not empty
if len(other.children) > 1: # Different size
return False
return self == other.children[0]
def __bool__(self):
return self.type_ != TYPE.unknown
class SymbolTYPEALIAS(SymbolTYPE):
""" Defines a type which is alias of another
"""
def __init__(self, name, lineno, alias):
assert isinstance(alias, SymbolTYPE)
super(SymbolTYPEALIAS, self).__init__(name, lineno, alias)
self.final = alias.final
@property
def is_alias(self):
""" Whether this is an alias of another type or not.
"""
return True
@property
def size(self):
return self.final.size
@property
def is_basic(self):
return self.final.is_basic
@property
def alias(self):
return self.children[0]
@property
def to_signed(self):
assert self.is_basic
return self.final.to_signed()
class SymbolTYPEREF(SymbolTYPEALIAS):
""" Creates a Type reference or usage.
Eg. DIM a As Integer
In this case, the Integer type is accessed.
It's an alias type, containing just the
original Type definition (SymbolTYPE), the
the lineno it is currently being accessed,
and if it was implicitly inferred or explicitly declared.
"""
def __init__(self, type_, lineno, implicit=False):
assert isinstance(type_, SymbolTYPE)
super(SymbolTYPEREF, self).__init__(type_.name, lineno, type_)
self.implicit = implicit
def to_signed(self):
assert self.is_basic
return self.final.to_signed()
@property
def type_(self):
assert self.is_basic
return self.final.type_
class Type(object):
""" Class for enumerating Basic Types.
e.g. Type.string.
"""
unknown = auto = SymbolBASICTYPE(TYPE.unknown)
ubyte = SymbolBASICTYPE(TYPE.ubyte)
byte_ = SymbolBASICTYPE(TYPE.byte_)
uinteger = SymbolBASICTYPE(TYPE.uinteger)
long_ = SymbolBASICTYPE(TYPE.long_)
ulong = SymbolBASICTYPE(TYPE.ulong)
integer = SymbolBASICTYPE(TYPE.integer)
fixed = SymbolBASICTYPE(TYPE.fixed)
float_ = SymbolBASICTYPE(TYPE.float_)
string = SymbolBASICTYPE(TYPE.string)
types = [unknown,
ubyte, byte_,
uinteger, integer,
ulong, long_,
fixed, float_,
string]
_by_name = {x.name: x for x in types}
@staticmethod
def size(t):
assert isinstance(t, SymbolTYPE)
return t.size
@staticmethod
def to_string(t):
assert isinstance(t, SymbolTYPE)
return t.name
@classmethod
def by_name(cls, typename):
""" Converts a given typename to Type
"""
return cls._by_name.get(typename, None)
@classproperty
def integrals(cls):
return (cls.byte_, cls.ubyte, cls.integer, cls.uinteger,
cls.long_, cls.ulong)
@classproperty
def signed(cls):
return cls.byte_, cls.integer, cls.long_, cls.fixed, cls.float_
@classproperty
def unsigned(cls):
return cls.ubyte, cls.uinteger, cls.ulong
@classproperty
def decimals(cls):
return cls.fixed, cls.float_
@classproperty
def numbers(cls):
return tuple(list(cls.integrals) + list(cls.decimals))
@classmethod
def is_numeric(cls, t):
assert isinstance(t, SymbolTYPE)
return t.final in cls.numbers
@classmethod
def is_signed(cls, t):
assert isinstance(t, SymbolTYPE)
return t.final in cls.signed
@classmethod
def is_unsigned(cls, t):
assert isinstance(t, SymbolTYPE)
return t.final in cls.unsigned
@classmethod
def is_integral(cls, t):
assert isinstance(t, SymbolTYPE)
return t.final in cls.integrals
@classmethod
def is_decimal(cls, t):
assert isinstance(t, SymbolTYPE)
return t.final in cls.decimals
@classmethod
def is_string(cls, t):
assert isinstance(t, SymbolTYPE)
return t.final == cls.string
@classmethod
def to_signed(cls, t):
""" Return signed type or equivalent
"""
assert isinstance(t, SymbolTYPE)
t = t.final
assert t.is_basic
if cls.is_unsigned(t):
return {cls.ubyte: cls.byte_,
cls.uinteger: cls.integer,
cls.ulong: cls.long_}[t]
if cls.is_signed(t) or cls.is_decimal(t):
return t
return cls.unknown
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/type_.py
|
type_.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:et:sw=4:
# ----------------------------------------------------------------------
# Copyleft (K), Jose M. Rodriguez-Rosa (a.k.a. Boriel)
#
# This program is Free Software and is released under the terms of
# the GNU General License
# ----------------------------------------------------------------------
from .symbol_ import Symbol
from .typecast import SymbolTYPECAST
from .var import SymbolVAR
from api.config import OPTIONS
from api.constants import SCOPE
from api.constants import CLASS
class SymbolARGUMENT(Symbol):
""" Defines an argument in a function call
"""
def __init__(self, value, lineno, byref=None):
""" Initializes the argument data. Byref must be set
to True if this Argument is passed by reference.
"""
super(SymbolARGUMENT, self).__init__(value)
self.lineno = lineno
self.byref = byref if byref is not None else OPTIONS.byref.value
@property
def t(self):
if self.byref or not self.type_.is_dynamic:
return self.value.t
if self.value.token in ('VAR', 'PARAMDECL'):
if self.value.scope == SCOPE.global_:
return self.value.t
else:
return self.value.t[1:] # Removed '$' prefix
return self.value.t
@property
def value(self):
return self.children[0]
@value.setter
def value(self, val):
self.children[0] = val
@property
def type_(self):
return self.value.type_
@property
def class_(self):
return getattr(self.value, 'class_', CLASS.unknown)
@property
def byref(self):
return self._byref
@byref.setter
def byref(self, value):
if value:
assert isinstance(self.value, SymbolVAR)
self._byref = value
@property
def mangled(self):
return self.value.mangled
@property
def size(self):
return self.type_.size
def __hash__(self):
return id(self)
def __eq__(self, other):
assert isinstance(other, Symbol)
if isinstance(other, SymbolARGUMENT):
return self.value == other.value
return self.value == other
def typecast(self, type_):
""" Test type casting to the argument expression.
On success changes the node value to the new typecast, and returns
True. On failure, returns False, and the node value is set to None.
"""
self.value = SymbolTYPECAST.make_node(type_, self.value, self.lineno)
return self.value is not None
|
zxbasic
|
/zxbasic-1.12.0.tar.gz/zxbasic-1.12.0/symbols/argument.py
|
argument.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.