response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
LeNet model from
`"Gradient-based learning applied to document recognition" <http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`_
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 28 x 28. | def lenet(pretrained=False, **kwargs):
"""LeNet model from
`"Gradient-based learning applied to document recognition" <http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`_
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 28 x 28.
"""
return LeNet(**kwargs) |
DTN model
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 32 x 32. | def dtn(pretrained=False, **kwargs):
""" DTN model
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 32 x 32.
"""
return DTN(**kwargs) |
ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs) |
ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs) |
ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs) |
ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs) |
ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs) |
ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs) |
ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) |
Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs) |
Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) |
Constructs a Simple Baseline model with a ResNet-101 backbone.
Args:
num_keypoints (int): number of keypoints
pretrained_backbone (bool, optional): If True, returns a model pre-trained on ImageNet. Default: True.
deconv_with_bias (bool, optional): Whether use bias in the deconvolution layer. Default: False
finetune (bool, optional): Whether use 10x smaller learning rate in the backbone. Default: False
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default: True | def pose_resnet101(num_keypoints, pretrained_backbone=True, deconv_with_bias=False, finetune=False, progress=True, **kwargs):
"""Constructs a Simple Baseline model with a ResNet-101 backbone.
Args:
num_keypoints (int): number of keypoints
pretrained_backbone (bool, optional): If True, returns a model pre-trained on ImageNet. Default: True.
deconv_with_bias (bool, optional): Whether use bias in the deconvolution layer. Default: False
finetune (bool, optional): Whether use 10x smaller learning rate in the backbone. Default: False
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default: True
"""
return _pose_resnet('resnet101', num_keypoints, Bottleneck, [3, 4, 23, 3], pretrained_backbone, deconv_with_bias, finetune, progress, **kwargs) |
3x3 convolution with padding | def conv3x3(in_planes, out_planes, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
padding=dilation,
dilation=dilation) |
Compute pairwise euclidean distance between two sets of features | def pairwise_euclidean_distance(x, y):
"""Compute pairwise euclidean distance between two sets of features"""
m, n = x.size(0), y.size(0)
dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + \
torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() \
- 2 * torch.matmul(x, y.t())
# for numerical stability
dist_mat = dist_mat.clamp(min=1e-12).sqrt()
return dist_mat |
Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person
Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_
Args:
dist_mat (tensor): pairwise distance matrix between two sets of features
identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and
:math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\_mat[i, j] = 1`,
otherwise :math:`identity\_mat[i, j] = 0`
return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False | def hard_examples_mining(dist_mat, identity_mat, return_idxes=False):
r"""Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person
Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_
Args:
dist_mat (tensor): pairwise distance matrix between two sets of features
identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and
:math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\_mat[i, j] = 1`,
otherwise :math:`identity\_mat[i, j] = 0`
return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False
"""
# the implementation here is a little tricky, dist_mat contains pairwise distance between probe image and other
# images in current mini-batch. As we want to select positive examples of the same person, we add a constant
# negative offset on other images before sorting. As a result, images of the **same** person will rank first.
sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + (-1e7) * (1 - identity_mat), dim=1,
descending=True)
dist_ap = sorted_dist_mat[:, 0]
hard_positive_idxes = sorted_idxes[:, 0]
# the implementation here is similar to above code, we add a constant positive offset on images of same person
# before sorting. Besides, we sort in ascending order. As a result, images of **different** persons will rank first.
sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 1e7 * identity_mat, dim=1,
descending=False)
dist_an = sorted_dist_mat[:, 0]
hard_negative_idxes = sorted_idxes[:, 0]
if return_idxes:
return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes
return dist_ap, dist_an |
Constructs a Reid-ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def reid_resnet18(pretrained=False, progress=True, **kwargs):
r"""Constructs a Reid-ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _reid_resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs) |
Constructs a Reid-ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def reid_resnet34(pretrained=False, progress=True, **kwargs):
r"""Constructs a Reid-ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _reid_resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs) |
Constructs a Reid-ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def reid_resnet50(pretrained=False, progress=True, **kwargs):
r"""Constructs a Reid-ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _reid_resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs) |
Constructs a Reid-ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr | def reid_resnet101(pretrained=False, progress=True, **kwargs):
r"""Constructs a Reid-ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _reid_resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs) |
Constructs a DeepLabV2 model with a ResNet-101 backbone.
Args:
num_classes (int, optional): number of classes. Default: 19
pretrained_backbone (bool, optional): If True, returns a model pre-trained on ImageNet. Default: True. | def deeplabv2_resnet101(num_classes=19, pretrained_backbone=True):
"""Constructs a DeepLabV2 model with a ResNet-101 backbone.
Args:
num_classes (int, optional): number of classes. Default: 19
pretrained_backbone (bool, optional): If True, returns a model pre-trained on ImageNet. Default: True.
"""
backbone = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained_backbone:
# download from Internet
saved_state_dict = load_state_dict_from_url(model_urls['deeplabv2_resnet101'], map_location=lambda storage, loc: storage, file_name="deeplabv2_resnet101.pth")
new_params = backbone.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[1] == 'layer5':
new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
backbone.load_state_dict(new_params)
classifier = ASPP_V2(2048, [6, 12, 18, 24], [6, 12, 18, 24], num_classes)
return Deeplab(backbone, classifier, num_classes) |
Wrap a transform for classification to a transform for keypoint detection.
Note that the keypoint detection label will keep the same before and after wrapper.
Args:
transform (class, callable): transform for classification
Returns:
transform for keypoint detection | def wrapper(transform: ClassVar):
""" Wrap a transform for classification to a transform for keypoint detection.
Note that the keypoint detection label will keep the same before and after wrapper.
Args:
transform (class, callable): transform for classification
Returns:
transform for keypoint detection
"""
class WrapperTransform(transform):
def __call__(self, image, **kwargs):
image = super().__call__(image)
return image, kwargs
return WrapperTransform |
Crop the given PIL Image and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image. | def resized_crop(img, top, left, height, width, size, interpolation=Image.BILINEAR,
keypoint2d: np.ndarray=None, intrinsic_matrix: np.ndarray=None):
"""Crop the given PIL Image and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image.
"""
assert isinstance(img, Image.Image), 'img should be PIL Image'
img, keypoint2d = crop(img, top, left, height, width, keypoint2d)
img, keypoint2d, intrinsic_matrix = resize(img, size, interpolation, keypoint2d, intrinsic_matrix)
return img, keypoint2d, intrinsic_matrix |
Crop the given PIL Image and resize it to desired size.
Args:
img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
output_size (sequence or int): (height, width) of the crop box. If int,
it is used for both directions
Returns:
PIL Image: Cropped image. | def center_crop(image, output_size, keypoint2d: np.ndarray):
"""Crop the given PIL Image and resize it to desired size.
Args:
img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
output_size (sequence or int): (height, width) of the crop box. If int,
it is used for both directions
Returns:
PIL Image: Cropped image.
"""
width, height = image.size
crop_height, crop_width = output_size
crop_top = int(round((height - crop_height) / 2.))
crop_left = int(round((width - crop_width) / 2.))
return crop(image, crop_top, crop_left, crop_height, crop_width, keypoint2d) |
Wrap a transform for classification to a transform for segmentation.
Note that the segmentation label will keep the same before and after wrapper.
Args:
transform (class, callable): transform for classification
Returns:
transform for segmentation | def wrapper(transform: ClassVar):
""" Wrap a transform for classification to a transform for segmentation.
Note that the segmentation label will keep the same before and after wrapper.
Args:
transform (class, callable): transform for classification
Returns:
transform for segmentation
"""
class WrapperTransform(transform):
def __call__(self, image, label):
image = super().__call__(image)
return image, label
return WrapperTransform |
Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows. | def _pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = np.shape(images)
width, height, depth = shape[-3:]
images = np.reshape(images, (-1, width, height, depth))
batch = np.shape(images)[0]
rows = np.minimum(rows, batch)
cols = np.minimum(batch // rows, cols)
images = images[:rows * cols]
images = np.reshape(images, (rows, cols, width, height, depth))
images = np.transpose(images, [0, 2, 1, 3, 4])
images = np.reshape(images, [rows * width, cols * height, depth])
return images |
Convert an operative config string to markdown format. | def markdownify_operative_config_str(string):
"""Convert an operative config string to markdown format."""
# TODO(b/37527917): Total hack below. Implement more principled formatting.
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines) |
Prepare the model. | def prepare_model(model_file, batch_size=1):
"""Prepare the model."""
mode = 'eval' if FLAGS.use_eval_mode else 'predict'
print('Initializing the model in %s mode.' % mode, flush=True)
# Read the model name from the gin file
model_reference = gin.query_parameter(
'trax.supervised.trainer_lib.train.model')
model = model_reference.scoped_configurable_fn(mode=mode)
dec_len = 32 if FLAGS.use_eval_mode else 1
batch_size_pd = max(1, batch_size // jax.local_device_count())
shape11 = shapes.ShapeDtype((batch_size_pd, dec_len), dtype=np.int32)
# shape11 = shapes.ShapeDtype((1, 1), dtype=np.int32)
model.init_from_file(
model_file, weights_only=True, input_signature=(shape11, shape11))
model = tl.Accelerate(model)
initial_state = model.state
vocab = t5_spc_vocab.SentencePieceVocabulary(data.DEFAULT_SPM_PATH)
return vocab, model, initial_state |
Train the RL agent.
Args:
output_dir: Output directory.
n_epochs: Number epochs to run the training for.
light_rl: deprecated, always True, left out for old gin configs.
light_rl_trainer: which light RL trainer to use (experimental). | def train_rl(
output_dir,
n_epochs=10000,
light_rl=True,
light_rl_trainer=light_trainers.PolicyGradient):
"""Train the RL agent.
Args:
output_dir: Output directory.
n_epochs: Number epochs to run the training for.
light_rl: deprecated, always True, left out for old gin configs.
light_rl_trainer: which light RL trainer to use (experimental).
"""
del light_rl
tf_np.set_allow_float64(FLAGS.tf_allow_float64)
task = rl_task.RLTask()
env_name = task.env_name
if FLAGS.jax_debug_nans:
config.update('jax_debug_nans', True)
if FLAGS.use_tpu:
config.update('jax_platform_name', 'tpu')
else:
config.update('jax_platform_name', '')
trainer = light_rl_trainer(task=task, output_dir=output_dir)
def light_training_loop():
"""Run the trainer for n_epochs and call close on it."""
try:
logging.info('Starting RL training for %d epochs.', n_epochs)
trainer.run(n_epochs, n_epochs_is_total_epochs=True)
logging.info('Completed RL training for %d epochs.', n_epochs)
trainer.close()
logging.info('Trainer is now closed.')
except Exception as e:
raise e
finally:
logging.info('Encountered an exception, still calling trainer.close()')
trainer.close()
logging.info('Trainer is now closed.')
if FLAGS.jax_debug_nans or FLAGS.disable_jit:
fastmath.disable_jit()
with jax.disable_jit():
light_training_loop()
else:
light_training_loop() |
Returns a `ShapeDtype` signature for the given `obj`.
A signature is either a `ShapeDtype` instance or a tuple of `ShapeDtype`
instances. Note that this function is permissive with respect to its inputs
(accepts lists or tuples or dicts, and underlying objects can be any type
as long as they have shape and dtype attributes) and returns the corresponding
nested structure of `ShapeDtype`.
Args:
obj: An object that has `shape` and `dtype` attributes, or a list/tuple/dict
of such objects.
Returns:
A corresponding nested structure of `ShapeDtype` instances. | def signature(obj):
"""Returns a `ShapeDtype` signature for the given `obj`.
A signature is either a `ShapeDtype` instance or a tuple of `ShapeDtype`
instances. Note that this function is permissive with respect to its inputs
(accepts lists or tuples or dicts, and underlying objects can be any type
as long as they have shape and dtype attributes) and returns the corresponding
nested structure of `ShapeDtype`.
Args:
obj: An object that has `shape` and `dtype` attributes, or a list/tuple/dict
of such objects.
Returns:
A corresponding nested structure of `ShapeDtype` instances.
"""
if isinstance(obj, (list, tuple)):
output = tuple(signature(x) for x in obj)
return output if isinstance(obj, tuple) else list(output)
elif isinstance(obj, dict):
return {k: signature(v) for (k, v) in obj.items()}
else:
return ShapeDtype(obj.shape, obj.dtype) |
Creates a new signature by splicing together any number of signatures.
The splicing effectively flattens the top level input signatures. For
instance, it would perform the following mapping:
- `*sigs: sd1, (sd2, sd3, sd4), (), sd5`
- return: `(sd1, sd2, sd3, sd4, sd5)`
Args:
*sigs: Any number of signatures. A signature is either a `ShapeDtype`
instance or a tuple of `ShapeDtype` instances.
Returns:
A single `ShapeDtype` instance if the spliced signature has one element,
else a tuple of `ShapeDtype` instances. | def splice_signatures(*sigs):
"""Creates a new signature by splicing together any number of signatures.
The splicing effectively flattens the top level input signatures. For
instance, it would perform the following mapping:
- `*sigs: sd1, (sd2, sd3, sd4), (), sd5`
- return: `(sd1, sd2, sd3, sd4, sd5)`
Args:
*sigs: Any number of signatures. A signature is either a `ShapeDtype`
instance or a tuple of `ShapeDtype` instances.
Returns:
A single `ShapeDtype` instance if the spliced signature has one element,
else a tuple of `ShapeDtype` instances.
"""
result_sigs = []
for sig in sigs:
if isinstance(sig, (list, tuple)):
result_sigs.extend(sig)
else:
result_sigs.append(sig)
return result_sigs[0] if len(result_sigs) == 1 else tuple(result_sigs) |
Asserts that an array has the given shape. | def assert_shape_equals(array, shape):
"""Asserts that an array has the given shape."""
assert array.shape == shape, (
'Invalid shape {}; expected {}.'.format(array.shape, shape)
) |
Asserts that two arrays have the same shapes. | def assert_same_shape(array1, array2):
"""Asserts that two arrays have the same shapes."""
assert_shape_equals(array1, array2.shape) |
Processes TensorFlow-relevant flags. | def _tf_setup_from_flags():
"""Processes TensorFlow-relevant flags."""
if FLAGS.enable_eager_execution:
tf.compat.v1.enable_eager_execution()
if FLAGS.tf_xla:
tf.config.optimizer.set_jit(True)
fastmath.tf.set_tf_xla_forced_compile(FLAGS.tf_xla_forced_compile)
tf.config.optimizer.set_experimental_options({
'pin_to_host_optimization': FLAGS.tf_opt_pin_to_host,
'layout_optimizer': FLAGS.tf_opt_layout,
})
tf_np.set_allow_float64(FLAGS.tf_allow_float64) |
Initializes gin-controlled bindings. | def _gin_parse_configs():
"""Initializes gin-controlled bindings."""
# Imports for configurables
# pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable
from trax import models as _trax_models
from trax import optimizers as _trax_opt
# pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable
configs = FLAGS.config if FLAGS.config is not None else []
# Override with --dataset and --model
if FLAGS.dataset:
configs.append("data_streams.dataset_name='%s'" % FLAGS.dataset)
if FLAGS.data_dir:
configs.append("data_streams.data_dir='%s'" % FLAGS.data_dir)
if FLAGS.model:
configs.append('[email protected].%s' % FLAGS.model)
gin.parse_config_files_and_bindings(FLAGS.config_file, configs) |
Returns a path to the output directory. | def _output_dir_or_default():
"""Returns a path to the output directory."""
if FLAGS.output_dir:
output_dir = FLAGS.output_dir
trainer_lib.log('Using --output_dir {}'.format(output_dir))
return os.path.expanduser(output_dir)
# Else, generate a default output dir (under the user's home directory).
try:
dataset_name = gin.query_parameter('data_streams.dataset_name')
except ValueError:
dataset_name = 'random'
output_name = '{model_name}_{dataset_name}_{timestamp}'.format(
model_name=gin.query_parameter('train.model').configurable.name,
dataset_name=dataset_name,
timestamp=datetime.datetime.now().strftime('%Y%m%d_%H%M'),
)
output_dir = os.path.join('~', 'trax', output_name)
output_dir = os.path.expanduser(output_dir)
print()
trainer_lib.log('No --output_dir specified')
trainer_lib.log('Using default output_dir: {}'.format(output_dir))
return output_dir |
Initializes TPU for TensorFlow.
Args:
worker: The BNS address of the remote TPU worker. If it's empty (the default
value), TF will assume the TPU devices are connected to the local host.
protocol: The network protocol used to connect to the TPU worker.
Returns:
The device name of the TPU worker's CPU. | def tf_init_tpu(worker='', protocol=None):
"""Initializes TPU for TensorFlow.
Args:
worker: The BNS address of the remote TPU worker. If it's empty (the default
value), TF will assume the TPU devices are connected to the local host.
protocol: The network protocol used to connect to the TPU worker.
Returns:
The device name of the TPU worker's CPU.
"""
protocol = protocol or 'grpc'
is_local = (worker in ('', 'local'))
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=worker)
if not is_local:
tf.config.experimental_connect_to_cluster(resolver, protocol=protocol)
tf.tpu.experimental.initialize_tpu_system(resolver)
if is_local:
return ''
else:
return '/job:worker' |
Make JAX GPU Cluster. | def _make_jax_gpu_cluster(host_id, server_ip, n_hosts, server_port=5005):
"""Make JAX GPU Cluster."""
addr = f'{server_ip}:{server_port}'
if host_id == 0:
logging.info('starting service on %s', addr)
service = xc.get_distributed_runtime_service(addr, n_hosts)
# We add an explicit call to shutdown the service via atexit as Python
# interpreter may not call the service destructor on process termination.
atexit.register(service.shutdown)
logging.info('connecting to service on %s', addr)
dist_client = xc.get_distributed_runtime_client(addr, host_id)
dist_client.connect()
atexit.register(dist_client.shutdown)
# register dist gpu backend
factory = functools.partial(jax.lib.xla_client.make_gpu_client,
dist_client, host_id)
jax.lib.xla_bridge.register_backend_factory('gpu', factory, priority=300) |
Decorator for input pipeline generators that logs examples at intervals. | def debug_pipeline(f, debug=False, method='pow', log_prefix=None):
"""Decorator for input pipeline generators that logs examples at intervals."""
if not debug:
return f
assert method in ('pow', 'every')
@functools.wraps(f)
def wrapper(*args, **kwargs):
count = 0
prefix = log_prefix or f.__name__
for example in f(*args, **kwargs):
count += 1
if method == 'every' or (method == 'pow' and (count & count - 1 == 0)):
logging.info('%s example[%d] = %r', prefix, count, example)
yield example
return wrapper |
Combines generator functions into one that runs them serially. | def Serial(*fns): # pylint: disable=invalid-name
"""Combines generator functions into one that runs them serially."""
def composed_fns(generator=None):
for f in fastmath.tree_flatten(fns):
generator = f(generator)
return generator
return composed_fns |
Combines generator functions into one that runs them in parallel.
Args:
fns: a sequence of datasets which are combined in parallel.
counters: a sequence of ints with same length as fns, please see comments on
its use below.
reweight_by_minimum: if set to True, then we re-weight every counter by the
minimal counter. E.g. counters (10000, 100000) are translated to (1, 10)
and hence for every 10 examples from the second dataset we are getting
1 example from the first dataset. Without reweighting first we would see
20 examples from the first and second dataset and then 90 thousand eamples
only from the first dataset.
gradually_reweight: if set to True, then we loop through the generators
using a recursive rule defined in emit_examples. First we sort generators
by the counters. If we have datasets with counters 1, 20, 40
(after sorting) then we yield examples (a(b c^2)^20)^*, where examples of
type a come from the first dataset, of type b from the second and of type
c from the third. The exponents are obtained through divisions of
subsequent counters.
use_remainders: if set to True as weell as gradually_reweight is set to
True and counters are 1, 20, 45 then after dealing with all examples in
the format (a(b c^2)^20)^*, the generator yields the remaining 5 examples
from the dataset with counter 45.
Returns:
parallel_generator: the generator yields samples according to given;
if counters are not given then samples are genereted uniformly.
Example 1:
gen = data.Parallel([dataset1, dataset2, dataset3], counters=(2, 1, 3))
defines a generator that yields 33% examples from dataset1, 16% examples from
dataset2 and 50% examples from dataset3.
Example 2:
gen = data.Parallel([dataset1, dataset2, dataset3], counters=(20, 50, 30))
defines a generator that yields 20% examples from dataset1, 50% examples from
dataset2 and 30% examples from dataset3. | def Parallel( # pylint: disable=invalid-name
fns=None,
counters=None,
reweight_by_minimum=False,
gradually_reweight=False,
use_remainders=False):
"""Combines generator functions into one that runs them in parallel.
Args:
fns: a sequence of datasets which are combined in parallel.
counters: a sequence of ints with same length as fns, please see comments on
its use below.
reweight_by_minimum: if set to True, then we re-weight every counter by the
minimal counter. E.g. counters (10000, 100000) are translated to (1, 10)
and hence for every 10 examples from the second dataset we are getting
1 example from the first dataset. Without reweighting first we would see
20 examples from the first and second dataset and then 90 thousand eamples
only from the first dataset.
gradually_reweight: if set to True, then we loop through the generators
using a recursive rule defined in emit_examples. First we sort generators
by the counters. If we have datasets with counters 1, 20, 40
(after sorting) then we yield examples (a(b c^2)^20)^*, where examples of
type a come from the first dataset, of type b from the second and of type
c from the third. The exponents are obtained through divisions of
subsequent counters.
use_remainders: if set to True as weell as gradually_reweight is set to
True and counters are 1, 20, 45 then after dealing with all examples in
the format (a(b c^2)^20)^*, the generator yields the remaining 5 examples
from the dataset with counter 45.
Returns:
parallel_generator: the generator yields samples according to given;
if counters are not given then samples are genereted uniformly.
Example 1:
gen = data.Parallel([dataset1, dataset2, dataset3], counters=(2, 1, 3))
defines a generator that yields 33% examples from dataset1, 16% examples from
dataset2 and 50% examples from dataset3.
Example 2:
gen = data.Parallel([dataset1, dataset2, dataset3], counters=(20, 50, 30))
defines a generator that yields 20% examples from dataset1, 50% examples from
dataset2 and 30% examples from dataset3.
"""
if counters:
assert len(counters) == len(fns)
# Remove generators with zero counters
counters = list(counters)
fns = list(fns)
non_zeros = [j for j in range(len(counters)) if counters[j] != 0]
counters = [counters[j] for j in non_zeros]
fns = [fns[j] for j in non_zeros]
else:
counters = [1] * len(fns)
if reweight_by_minimum:
counters = [math.floor(counter / min(counters)) for counter in counters]
def emit_examples(sorted_counters_with_gens, prev_counter):
if sorted_counters_with_gens:
_, counter, generator = sorted_counters_with_gens[0]
repeats = math.floor(counter / prev_counter)
for _ in range(repeats):
yield next(generator)
yield from emit_examples(sorted_counters_with_gens[1:], counter)
def parallel_generator(gen=None):
# If gradually_reweight is set to False then
# current_counters are increased step by step; they are reset to 0s when
# current_counters[idx] == counters[idx] for all idx. See
# test_parallel_with_weights_three_datasets for an example of how
# current_counters are changed during computation.
# If gradually_reweight is set to False then we loop using a
# recursive rule defined in emit_examples.
generators = []
for f in fns:
if gen:
generators.append(f(gen))
else:
# This handles the case when the function f cannot be
# called on None.
generators.append(f())
if gradually_reweight:
counters_with_gens = zip(range(len(generators)), counters, generators)
sorted_counters_with_gens = sorted(counters_with_gens, key=lambda x: x[1])
while True:
yield from emit_examples(sorted_counters_with_gens, min(counters))
if use_remainders:
# Below we are dealing with remainders.
fractions = []
for i in range(len(sorted_counters_with_gens)):
_, counter, generator = sorted_counters_with_gens[i]
processed = 1
for fraction in fractions:
processed *= fraction
remainder = counter - processed
for _ in range(remainder):
yield next(generator)
if i < len(sorted_counters_with_gens) - 1:
_, next_counter, _ = sorted_counters_with_gens[i + 1]
fractions.append(math.floor(next_counter / counter))
else:
current_counters = [0] * len(generators)
while True:
for idx, generator in enumerate(generators):
if current_counters[idx] < counters[idx]:
current_counters[idx] += 1
# instead of checking current_counters[idx] == counters[idx] for
# all idx, we check the equivalent condition:
if sum(current_counters) == sum(counters):
current_counters = [0] * len(generators)
yield next(generator)
return parallel_generator |
Returns a shuffle function with the given queue size. | def Shuffle(queue_size=1024): # pylint: disable=invalid-name
"""Returns a shuffle function with the given queue size."""
return lambda g: shuffle(g, queue_size) |
Returns a batching function with given batch size. | def Batch(batch_size): # pylint: disable=invalid-name
"""Returns a batching function with given batch size."""
return lambda g: batch(g, batch_size) |
Duplicates (copies) the top element (inputs).
The generator stream is augmented in the following way:
- If the stream consists of a single element `(inputs, )`,
the inputs simply get copied to `(inputs, inputs)`.
- If the stream consists of multiple elements, for example
`(inputs, weights)`, the rest of elements get moved toward
the right side `(inputs, inputs, weights)`.
Returns:
the duplicating function. | def Dup(): # pylint: disable=invalid-name
"""Duplicates (copies) the top element (inputs).
The generator stream is augmented in the following way:
- If the stream consists of a single element `(inputs, )`,
the inputs simply get copied to `(inputs, inputs)`.
- If the stream consists of multiple elements, for example
`(inputs, weights)`, the rest of elements get moved toward
the right side `(inputs, inputs, weights)`.
Returns:
the duplicating function.
"""
def _copy(xs):
x, *rest = xs
return (x, x, *rest)
return lambda g: map(lambda x: _copy(x), g) |
Filters empty examples.
Filters any example that has an array of size (0,) (if axes=None).
Alternatively, checks only axes provided in `axes' list. Contrary to
FilterByLength used with several elements with length_axis, here the example
would be filtered if ANY of the dimensions listed in `axes' contains an empty
array.
Args:
axes: list of indices to check, if None, all of them.
debug: If true, emits a log everytime we filter out an empty example.
Returns:
Function filtering empty examples. | def FilterEmptyExamples(axes=None, debug=False): # pylint: disable=invalid-name
"""Filters empty examples.
Filters any example that has an array of size (0,) (if axes=None).
Alternatively, checks only axes provided in `axes' list. Contrary to
FilterByLength used with several elements with length_axis, here the example
would be filtered if ANY of the dimensions listed in `axes' contains an empty
array.
Args:
axes: list of indices to check, if None, all of them.
debug: If true, emits a log everytime we filter out an empty example.
Returns:
Function filtering empty examples.
"""
def _filter_examples(generator):
for example in generator:
correct = True
for i, unused_tuple_element in enumerate(example):
if axes is None or i in axes:
if example[i].shape == (0,):
correct = False
break
if correct:
yield example
elif debug:
logging.info('Filtered example: %r', example)
return _filter_examples |
Returns a function that filters out examples by length.
Args:
max_length: int. If not None, indicates maximum length.
min_length: int. If not None, indicates minimum length.
length_keys: (list) which example keys to take into account.
length_axis: which shape axis to take into account.
Returns:
a function that filters out examples by length. | def FilterByLength(max_length, min_length=0, # pylint: disable=invalid-name
length_keys=None, length_axis=0):
"""Returns a function that filters out examples by length.
Args:
max_length: int. If not None, indicates maximum length.
min_length: int. If not None, indicates minimum length.
length_keys: (list) which example keys to take into account.
length_axis: which shape axis to take into account.
Returns:
a function that filters out examples by length.
"""
assert max_length is not None or min_length is not None
length_keys = length_keys or [0, 1]
length_fn = lambda x: _length_fn(x, length_axis, length_keys)
def filtered(gen):
for example in gen:
example_len = length_fn(example)
# Checking max length boundary.
if max_length is not None:
if example_len > max_length:
continue
# Checking min length boundary.
if min_length is not None:
if example_len < min_length:
continue
# Within bounds.
yield example
return filtered |
Returns a stream function that resizes items as specified by ``len_map``.
Args:
len_map: Dictionary that specifies maximum shapes for potentially multiple
features per stream item. For example, given a stream of tokenized
string pairs, one could enforce a maximum length of 256 tokens for each
string by using ``len_map={0: (256,), 1: (256,)}``. | def TruncateToLength(len_map=None): # pylint: disable=invalid-name
"""Returns a stream function that resizes items as specified by ``len_map``.
Args:
len_map: Dictionary that specifies maximum shapes for potentially multiple
features per stream item. For example, given a stream of tokenized
string pairs, one could enforce a maximum length of 256 tokens for each
string by using ``len_map={0: (256,), 1: (256,)}``.
"""
@debug_data_pipeline.debug_pipeline
def _truncate_to_length(generator):
for example in generator:
if isinstance(example, np.ndarray):
example = (example,)
if isinstance(example, (list, tuple)):
example = list(example)
if len_map is not None:
for key, max_len in len_map.items():
example_len = example[key].shape
if example_len > max_len:
example[key] = np.resize(example[key], max_len)
output = tuple(example)
else:
output = None
raise ValueError(f'Unknown example type: {example}')
yield output
return _truncate_to_length |
Pads the values to lengths given in `len_map'.
len_map contains a dictionary of example keys to dimension sizes.
Args:
len_map: dict of int to int, we pad examples to lengths
given by the values of the dict. If multiple is True, the dimensions are
padded to multiple of this value.
pad_value: dict of int to int. The value gets applied to
constant_values on numpy.pad per given dimension.
multiple: boolean. If False, pads to the value of len_map. If True, pads to
closest multiple of value of len_map.
Returns:
Function to pad examples to given lengths. | def PadToLength( # pylint: disable=invalid-name
len_map=None, pad_value=0, multiple=False):
"""Pads the values to lengths given in `len_map'.
len_map contains a dictionary of example keys to dimension sizes.
Args:
len_map: dict of int to int, we pad examples to lengths
given by the values of the dict. If multiple is True, the dimensions are
padded to multiple of this value.
pad_value: dict of int to int. The value gets applied to
constant_values on numpy.pad per given dimension.
multiple: boolean. If False, pads to the value of len_map. If True, pads to
closest multiple of value of len_map.
Returns:
Function to pad examples to given lengths.
"""
@debug_data_pipeline.debug_pipeline
def _pad_to_length(generator):
for example in generator:
if isinstance(example, (list, tuple)):
example = list(example)
for key, value in len_map.items():
array_length = example[key].shape[0]
if multiple:
padding_len = array_length - ((array_length // value) * value)
else:
padding_len = max([0, value-example[key].shape[0]])
example[key] = np.pad(example[key],
pad_width=(0, padding_len),
mode='constant',
constant_values=pad_value[key])
output = tuple(example)
else:
if not isinstance(example, np.ndarray):
raise ValueError(f'example isn\'t nparray, but should be: {example}')
array_length = example.shape[0]
if multiple:
padding_len = (
array_length - ((array_length // len_map[0]) * len_map[0]))
else:
padding_len = max(0, len_map[0] - array_length)
output = np.pad(example,
pad_width=(0, padding_len),
mode='constant',
constant_values=pad_value[0])
yield output
if len_map is None:
raise ValueError('len_map parameter should be provided.')
return _pad_to_length |
Returns a function for bucketing inputs, see `bucket_by_length`. | def BucketByLength(boundaries, batch_sizes, # pylint: disable=invalid-name
length_keys=None, length_axis=0, strict_pad_on_len=False):
"""Returns a function for bucketing inputs, see `bucket_by_length`."""
length_keys = length_keys or [0, 1]
# In all cases so far, we use a length function of the following form.
length_fn = lambda x: _length_fn(x, length_axis, length_keys)
return lambda g: bucket_by_length( # pylint: disable=g-long-lambda
g, length_fn, boundaries, batch_sizes, strict_pad_on_len) |
Pipeline that just does MLM. | def MLM(vocab_size=None, # pylint:disable=invalid-name
max_length=None,
noise_density=0.15,
mean_noise_span_length=3.0):
"""Pipeline that just does MLM."""
return Serial(
# Generate sequential chunks.
generate_sequential_chunks(max_length=max_length),
# Generate mask and chunk.
generate_random_noise_mask(
noise_density=noise_density,
mean_noise_span_length=mean_noise_span_length),
# Consume mask and chunk to give (input, targets).
consume_noise_mask(vocab_size=vocab_size),
) |
Chunks examples so as to make inputs/outputs of specified lenghts. | def PrefixLM(input_length=128, output_length=512): # pylint:disable=invalid-name
"""Chunks examples so as to make inputs/outputs of specified lenghts."""
def _f(generator):
for example in generator:
n_tokens = len(example)
# Iterate:
# |--------|<---- input_length ---->|<- output_length ->|--------------|
# ^ ^ ^ ^
# | | | |
# 0 input_begin_idx input_end_idx output_end_idx
input_begin_idx = 0
# While you can make an input batch, keep going.
while input_begin_idx + input_length < n_tokens:
input_end_idx = input_begin_idx + input_length
output_end_idx = min(input_end_idx + output_length, n_tokens)
yield (example[input_begin_idx:input_end_idx],
example[input_end_idx:output_end_idx])
# Update the indices.
input_begin_idx = output_end_idx
return _f |
Prepares the input needed for training of Language Models.
Each example needs to contain two elements (input and target).
Input is concatenated to target and, if pad_to_length is given, padded to
length provided.
The loss_weights indicates only the target, without input nor padding.
Args:
pad_to_length: int, total length of padding of input and target arrays.
Returns:
Function to return input for a LM. | def ConcatenateToLMInput(pad_to_length=None): # pylint: disable=invalid-name
"""Prepares the input needed for training of Language Models.
Each example needs to contain two elements (input and target).
Input is concatenated to target and, if pad_to_length is given, padded to
length provided.
The loss_weights indicates only the target, without input nor padding.
Args:
pad_to_length: int, total length of padding of input and target arrays.
Returns:
Function to return input for a LM.
"""
@debug_data_pipeline.debug_pipeline
def _concatenate_to_lm_input(generator):
for example in generator:
if isinstance(example, (list, tuple)) and (len(example) == 2):
concatenated = np.concatenate((example[0], example[1]), axis=-1)
loss_weights = np.concatenate((np.zeros_like(example[0]),
np.ones_like(example[1])))
if pad_to_length is not None:
padding_len = pad_to_length - (
example[0].shape[0] + example[1].shape[0])
if padding_len < 0:
raise ValueError(
'Example lengths '
f'({example[0].shape[0]}, {example[1].shape[0]}) '
f'longer than pad_to_length ({pad_to_length}).')
loss_weights = np.pad(loss_weights, (0, padding_len), 'constant')
concatenated = np.pad(concatenated, (0, padding_len), 'constant')
output = (concatenated, concatenated, loss_weights)
elif isinstance(example, (list, tuple)) and (len(example) == 1):
# Make x into (x, x)
output = (example[0], example[0])
elif isinstance(example, np.ndarray):
# Make x into (x, x)
output = (example, example)
else:
output = None
raise ValueError(f'Unknown input to ConcatenateToLMInput: {example}')
yield output
return _concatenate_to_lm_input |
Casts the given indices to the given dtype. | def CastTo(dtype=np.int32, indices=(0, 1,), debug=False): # pylint: disable=invalid-name
"""Casts the given indices to the given dtype."""
def _cast_fn(generator):
debug_count = 0
for example in generator:
debug_count += 1
assert isinstance(example, tuple)
example = list(example)
dtype_mismatch = False
original_index_and_dtype = []
for i in range(len(example)):
if i not in indices:
continue
original_type = example[i].dtype
if original_type != dtype:
if not (original_type == np.int64 and dtype == np.int32):
# Downcasting from np.int64 to np.int32 is OK
original_index_and_dtype.append((i, original_type))
example[i] = example[i].astype(dtype)
dtype_mismatch = True
if debug and dtype_mismatch and original_index_and_dtype:
logging.info('dtype mismatch in example[%d] = %r was earlier: %r',
debug_count, example, original_index_and_dtype)
yield tuple(example)
return _cast_fn |
Appends values provided in 'val` to inputs.
val are keyed by example keys, its values contain appended tensors.
Args:
val: dict of int to tensors. Specific keys get the tensors specified in
values appended.
Returns:
Funtion to append tensors to examples. | def AppendValue(val=None): # pylint: disable=invalid-name
"""Appends values provided in 'val` to inputs.
val are keyed by example keys, its values contain appended tensors.
Args:
val: dict of int to tensors. Specific keys get the tensors specified in
values appended.
Returns:
Funtion to append tensors to examples.
"""
@debug_data_pipeline.debug_pipeline
def _append_value(generator):
for example in generator:
if isinstance(example, tuple):
example = list(example)
if val is not None:
for key, value in val.items():
example[key] = np.append(example[key], value, -1)
output = tuple(example)
else:
if not isinstance(example, np.ndarray):
raise ValueError(f'example isn\'t nparray, but should be: {example}')
output = np.append(example, val[0])
yield output
return _append_value |
Returns a function to add loss weights; see `add_loss_weights`. | def AddLossWeights(id_to_mask=None): # pylint: disable=invalid-name
"""Returns a function to add loss weights; see `add_loss_weights`."""
return lambda g: add_loss_weights(g, id_to_mask=id_to_mask) |
Returns a function which unbatches. | def UnBatch(): # pylint: disable=invalid-name
"""Returns a function which unbatches."""
def _unbatch(generator):
for batched_example in generator:
# batched_example is usually like:
# (batched_inputs, batched_outputs) or
# (batched_inputs, batched_outputs, batched_weights)
assert isinstance(batched_example, tuple)
# assert all lengths are the same.
batch_sizes = list(set(map(lambda example: example.shape[0],
batched_example)))
assert len(batch_sizes) == 1
# Now unbatch examples.
for example_idx in range(batch_sizes[0]):
yield tuple(map(lambda x: x[example_idx], batched_example)) # pylint: disable=cell-var-from-loop
return _unbatch |
Pre-fetches a number of examples from generator in a separate process. | def Prefetch(n_prefetch=2): # pylint: disable=invalid-name
"""Pre-fetches a number of examples from generator in a separate process."""
def prefetch(generator):
in_q, out_q = mp.Queue(), mp.Queue()
p = mp.Process(target=_generator_process, args=(generator, in_q, out_q))
for _ in range(n_prefetch):
in_q.put(None)
p.start()
while True:
yield out_q.get()
in_q.put(None)
return prefetch |
Sets each host at (dataset_size/n_hosts)-th of the dataset. | def UniformlySeek(name=None, host_id=None, n_hosts=None, dataset_size=None): # pylint: disable=invalid-name
"""Sets each host at (dataset_size/n_hosts)-th of the dataset."""
if not dataset_size:
dataset_size = 2 ** 18 # 512 * 512
logging.error(
'No dataset size given to Uniformly seek, assuming: %d', dataset_size)
assert name
host_id = jax.process_index() if host_id is None else host_id
n_hosts = n_hosts or jax.host_count()
each_host = int(dataset_size / n_hosts)
def _f(generator):
# Each host seeks to the appropriate point in the dataset.
num_to_seek = int(host_id * each_host)
start_time = time.time()
logging.info('Dataset[%s] host_id[%d] is seeking to position[%d]',
name, host_id, num_to_seek)
for _ in range(num_to_seek):
next(generator)
logging.info('Dataset[%s] host_id[%d] reached position[%d]. '
'Time taken [%s] seconds',
name, host_id, num_to_seek, time.time() - start_time)
for example in generator:
yield example
return _f |
Returns a function that counts and skips examples (see above). | def CountAndSkip(name): # pylint: disable=invalid-name
"""Returns a function that counts and skips examples (see above)."""
return lambda g: count_and_skip(g, name) |
Creates a logging component of the input pipeline. | def Log(n_steps_per_example=1, only_shapes=True): # pylint: disable=invalid-name
"""Creates a logging component of the input pipeline."""
def log(stream):
counter = 0
for example in stream:
item_to_log = example
if only_shapes:
item_to_log = fastmath.nested_map(shapes.signature, example)
if counter % n_steps_per_example == 0:
logging.info(str(item_to_log))
print(item_to_log)
counter += 1
yield example
return log |
Shuffles a sample stream using a random-out next-in queue of given size.
Args:
samples: Stream of samples for eventual use as training data or eval data.
queue_size: Minimum number of samples within which the streamed shuffling
takes place.
Yields:
Shuffled stream of samples, ready for further processing, e.g., grouping
into batches. | def shuffle(samples, queue_size):
"""Shuffles a sample stream using a random-out next-in queue of given size.
Args:
samples: Stream of samples for eventual use as training data or eval data.
queue_size: Minimum number of samples within which the streamed shuffling
takes place.
Yields:
Shuffled stream of samples, ready for further processing, e.g., grouping
into batches.
"""
if queue_size < 1:
raise ValueError(f'Arg queue_size ({queue_size}) is less than 1.')
if queue_size == 1:
logging.warning('Queue size of 1 results in no shuffling.')
queue = []
try:
# Prep: fill the queue.
for _ in range(queue_size):
queue.append(next(samples))
# Core streaming shuffle: yield sample from random location in queue, then
# fill that location with new sample from input stream.
for sample in samples:
i = np.random.randint(queue_size)
yield queue[i]
queue[i] = sample
except StopIteration:
# Only get here if the initial queue fill fails.
logging.warning(
'Not enough samples (%d) to fill initial queue (size %d).',
len(queue), queue_size)
# No new samples coming in; shuffle and drain the queue.
np.random.shuffle(queue)
for sample in queue:
yield sample |
Batch and pad generator as in tf.data.Dataset.padded_batch. | def batch(generator, batch_size):
"""Batch and pad generator as in tf.data.Dataset.padded_batch."""
if batch_size <= 0:
raise ValueError(f'Batch size must be positive, but is {batch_size}.')
buf = []
i = 0
for example in generator:
buf.append(example) # Examples are tuples of tensors.
if len(buf) == batch_size:
# buf is a list of tuples, e.g., [(in1, tgt1), (in2, tgt2), (in3, tgt3)]
# batch is a tuple of arrays: ([in1, in2, in3], [tgt1, tgt2, tgt3])
try:
batched_example = tuple(
pad_to_max_dims([np.asarray(tensor) for tensor in x])
for x in zip(*buf))
except ValueError as e:
for j in range(len(buf)):
logging.error('Batch[%d][%d] input shape: %r output shape: %r',
i, j, buf[j][0].shape, buf[j][1].shape)
for j in range(len(buf)):
logging.error('Batch[%d][%d] input: %r', i, j, buf[j][0])
logging.error('Batch[%d][%d] output: %r', i, j, buf[j][1])
raise e
i += 1
yield batched_example
buf = [] |
Pad a tuple of tensors to a joint dimension and return their batch.
For example, a pair of tensors of shape (2, 10) and (3, 9) will be padded
to (3, 10) both and the returned tensor will have shape (2, 3, 10).
When boundary is specified, we try to pad all unknown dimensions to boundary
if possible, which can help reduce the number of different shapes occurring
in the tensors and speed up XLA compilation. So, for example, a pair of
tensors of shapes (8, 10), (8, 9) with boundary=12 will be padded to (8, 12).
One special case occurs when boundary is much higher than the padding length
that we'd use without boundary. For example, tensors (2, 10) and (3, 9) with
boundary=12 could end up padded to (12, 12), but this is very wasteful in
the first dimension. In that case, we will use the closest power-of-2 instead
of the boundary, so the we will end up padding to (4, 12) instead of (12, 12).
Args:
tensors: a tuple or list of tensors to pad
boundary: int or None; if given, expand the padded dimensions to this size
strict_pad_on_len: bool; if true we pad on the length dimension, dim[0]
strictly as a multiple of boundary.
Returns:
a tensor, the tensors padded together | def pad_to_max_dims(tensors, boundary=None, strict_pad_on_len=False):
"""Pad a tuple of tensors to a joint dimension and return their batch.
For example, a pair of tensors of shape (2, 10) and (3, 9) will be padded
to (3, 10) both and the returned tensor will have shape (2, 3, 10).
When boundary is specified, we try to pad all unknown dimensions to boundary
if possible, which can help reduce the number of different shapes occurring
in the tensors and speed up XLA compilation. So, for example, a pair of
tensors of shapes (8, 10), (8, 9) with boundary=12 will be padded to (8, 12).
One special case occurs when boundary is much higher than the padding length
that we'd use without boundary. For example, tensors (2, 10) and (3, 9) with
boundary=12 could end up padded to (12, 12), but this is very wasteful in
the first dimension. In that case, we will use the closest power-of-2 instead
of the boundary, so the we will end up padding to (4, 12) instead of (12, 12).
Args:
tensors: a tuple or list of tensors to pad
boundary: int or None; if given, expand the padded dimensions to this size
strict_pad_on_len: bool; if true we pad on the length dimension, dim[0]
strictly as a multiple of boundary.
Returns:
a tensor, the tensors padded together
"""
# TODO(afrozm): Unify this later.
if ((boundary is not None) and
(strict_pad_on_len or isinstance(boundary, (list, tuple)))):
ndim = tensors[0].ndim
if not isinstance(boundary, (list, tuple)):
boundary = [boundary] * ndim
if ndim != len(boundary):
raise ValueError(f'ndim != len(boundary) - '
f'ndim({ndim}) vs boundary({boundary}) '
f'len(boundary) = {len(boundary)}.')
max_len_per_dim = [0] * ndim
for tensor in tensors:
max_len_per_dim = [
max(e, s) for e, s in zip(tensor.shape, max_len_per_dim)]
# Round everything up to a multiple of boundary in the respective dimension.
len_per_dim = [
max_len_per_dim[i] if not b else b * math.ceil(max_len_per_dim[i] / b)
for i, b in enumerate(boundary)]
padded_tensors = [
np.pad(t, [(0, len_per_dim[i] - t.shape[i]) for i in range(ndim)],
mode='constant', constant_values=t.dtype.type(0))
for t in tensors]
return np.stack(padded_tensors)
max_len_to_pad = []
padding_needed = False
dim = len(tensors[0].shape)
for i in range(dim):
max_len = max([t.shape[i] for t in tensors])
min_len = min([t.shape[i] for t in tensors])
if max_len == min_len and max_len == boundary: # No padding needed.
max_len_to_pad.append(max_len)
elif boundary is None:
max_len_to_pad.append(max_len)
padding_needed = True
else:
padding_needed = True
cur_boundary = max(max_len, boundary)
if 2 * max_len < cur_boundary:
cur_boundary = 2**int(np.ceil(np.log2(max_len)))
max_len_to_pad.append(cur_boundary)
if not padding_needed:
return np.stack(tensors)
padded_tensors = []
for t in tensors:
pad_widths = [(0, max_len_to_pad[i] - t.shape[i]) for i in range(dim)]
padded_t = np.pad(t, pad_widths, mode='constant',
constant_values=t.dtype.type(0))
padded_tensors.append(padded_t)
return np.stack(padded_tensors) |
Bucket by length, like tf.data.experimental.bucket_by_sequence_length.
This function draws examples from the provided `generator` and puts an
example into a bucket depending on `l = length_fn(example)`. Which bucket
is used depends on between which `boundaries` is l. When a bucket reaches
its batch size, as specified by `batch_sizes`, generates a batch of
padded examples from this bucket.
Args:
generator: python generator to draw data from.
length_fn: a function taking the example and returning the length.
boundaries: a list of bucket boundaries.
batch_sizes: a list of batch sizes.
strict_pad_on_len: bool; if true we pad on the length dimension, dim[0]
strictly as a multiple of boundary.
Yields:
An input batch, which comes from one of the buckets. | def bucket_by_length(generator, length_fn, boundaries, batch_sizes,
strict_pad_on_len=False):
"""Bucket by length, like tf.data.experimental.bucket_by_sequence_length.
This function draws examples from the provided `generator` and puts an
example into a bucket depending on `l = length_fn(example)`. Which bucket
is used depends on between which `boundaries` is l. When a bucket reaches
its batch size, as specified by `batch_sizes`, generates a batch of
padded examples from this bucket.
Args:
generator: python generator to draw data from.
length_fn: a function taking the example and returning the length.
boundaries: a list of bucket boundaries.
batch_sizes: a list of batch sizes.
strict_pad_on_len: bool; if true we pad on the length dimension, dim[0]
strictly as a multiple of boundary.
Yields:
An input batch, which comes from one of the buckets.
"""
buckets = [[] for _ in range(len(batch_sizes))]
boundaries = boundaries + [math.inf] # Max boundary is unlimited.
for example in generator:
length = length_fn(example)
# `bucket_idx` will always be < len(boundaries), since boundaries is right
# padded by `math.inf`.
bucket_idx = min([i for i, b in enumerate(boundaries) if length <= b])
buckets[bucket_idx].append(example)
if len(buckets[bucket_idx]) == batch_sizes[bucket_idx]:
batched = zip(*buckets[bucket_idx])
boundary = boundaries[bucket_idx]
boundary = None if boundary == math.inf else boundary
padded_batch = tuple(
pad_to_max_dims(x, boundary, strict_pad_on_len) for x in batched)
yield padded_batch
buckets[bucket_idx] = [] |
Add weights to inputs without weights and masks by id if requested.
The generator stream is augmented in the following way:
- If the stream consists of pairs `(inputs, targets)`, a loss mask is added
that is creates as a tensor of ones of the same shape as targets.
- If `id_to_mask` is not `None`, and the stream (after the previous point)
has triples `(inputs, targets, weights)`, the weights are multiplied by a
0/1 mask that is 0 iff targets is equal to `id_to_mask` (1 otherwise).
Args:
generator: Stream of tuples.
id_to_mask: If not None, int-valued id that represents padding, as opposed
to true target IDs.
Yields:
Examples from the augmented stream. | def add_loss_weights(generator, id_to_mask=None):
"""Add weights to inputs without weights and masks by id if requested.
The generator stream is augmented in the following way:
- If the stream consists of pairs `(inputs, targets)`, a loss mask is added
that is creates as a tensor of ones of the same shape as targets.
- If `id_to_mask` is not `None`, and the stream (after the previous point)
has triples `(inputs, targets, weights)`, the weights are multiplied by a
0/1 mask that is 0 iff targets is equal to `id_to_mask` (1 otherwise).
Args:
generator: Stream of tuples.
id_to_mask: If not None, int-valued id that represents padding, as opposed
to true target IDs.
Yields:
Examples from the augmented stream.
"""
for example in generator:
if len(example) > 3 or len(example) < 2:
assert id_to_mask is None, 'Cannot automatically mask this stream.'
yield example
else:
if len(example) == 2:
weights = np.ones_like(example[1]).astype(np.float32)
else:
weights = example[2].astype(np.float32)
mask = 1.0 - np.equal(example[1], id_to_mask).astype(np.float32)
weights *= mask
output = (example[0], example[1], weights)
yield output |
Returns a function that generates a random noise mask. | def generate_random_noise_mask(noise_density=0.15,
mean_noise_span_length=3.0,
seed1=None,
seed2=None):
"""Returns a function that generates a random noise mask."""
def _f(generator):
for example in generator:
length = len(example)
noise_mask = random_spans_noise_mask(
length, noise_density=noise_density,
mean_noise_span_length=mean_noise_span_length,
seed1=seed1, seed2=seed2, example=example)
yield (example, noise_mask)
return _f |
Consumes (tokens, noise mask) and returns (inputs, targets). | def consume_noise_mask(vocab_size=32100):
"""Consumes (tokens, noise mask) and returns (inputs, targets)."""
def _noise_span_to_unique_sentinel(tokens, noise_mask):
prev_token_is_noise = np.pad(
noise_mask[:-1], [1, 0], mode='constant', constant_values=False)
first_noise_tokens = np.logical_and(noise_mask,
np.logical_not(prev_token_is_noise))
subsequent_noise_tokens = np.logical_and(noise_mask, prev_token_is_noise)
sentinel = vocab_size - np.cumsum(first_noise_tokens)
tokens = np.where(first_noise_tokens, sentinel, tokens)
return tokens[np.logical_not(subsequent_noise_tokens)]
def _f(generator):
for tokens, noise_mask in generator:
# Returns inputs and targets.
yield (_noise_span_to_unique_sentinel(tokens, noise_mask),
_noise_span_to_unique_sentinel(tokens, np.logical_not(noise_mask)))
return _f |
Returns a function that generates chunks of atmost max_length length. | def generate_sequential_chunks(max_length=None):
"""Returns a function that generates chunks of atmost max_length length."""
def _f(generator):
for example in generator:
n_tokens = len(example)
if n_tokens <= max_length:
yield example
else:
n_segments = int(math.ceil(float(n_tokens) / float(max_length)))
for i in range(n_segments):
start = max_length * i
end = min(start + max_length, n_tokens)
yield example[start:end]
return _f |
Data stream for the add problem: <S>x+y<S>(x+y).
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
min_length: minimal length of w.
max_length: maximal length of w.
pad_to_multiple: int, pad length to be multiple of this number.
encdec: bool, if True return encoder-decoder style inputs (default: False)
Returns:
python generator of tuples of data examples | def addition_input_stream(
vocab_size=gin.REQUIRED, batch_size=gin.REQUIRED, min_length=gin.REQUIRED,
max_length=gin.REQUIRED, pad_to_multiple=32, encdec=False):
"""Data stream for the add problem: <S>x+y<S>(x+y).
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
min_length: minimal length of w.
max_length: maximal length of w.
pad_to_multiple: int, pad length to be multiple of this number.
encdec: bool, if True return encoder-decoder style inputs (default: False)
Returns:
python generator of tuples of data examples
"""
base = vocab_size - 3 # We use 0 to pad, base+1 as "+" and base+2 as "<S>".
def single_example(max_length, min_length):
"""Generate a stream of random mini-batches."""
add_len = (min_length - 1) // 2
l1 = np.random.randint((max_length - add_len + 1) // 2) + add_len
l2 = np.random.randint(max_length - l1 - 1) + 1
n1 = random_number_lower_endian(l1, base)
n2 = random_number_lower_endian(l2, base)
result = lower_endian_to_number(n1, base) + lower_endian_to_number(
n2, base)
inp = n1 + [base] + n2
tgt = number_to_lower_endian(result, base)
if encdec:
x = [i + 1 for i in inp]
y = [i + 1 for i in tgt]
weights = [1] * len(tgt)
candidate_example = (np.array(x), np.array(y), np.array(weights))
if any(len(sample) > max_length for sample in candidate_example):
# sample too long, try again
return single_example(max_length, min_length)
return (np.array(x), np.array(y), np.array(weights))
else:
x = [base+2] + [i+1 for i in inp] + [base+2] + [i+1 for i in tgt]
weights = ([0] * (len(inp) + 2)) + ([1] * len(tgt))
return (np.array(x), np.array(x), np.array(weights))
def batches(max_length, min_length):
"""Batches of examples."""
if max_length < 3 or min_length < 3:
raise ValueError('Maximum/minimum length must be at least 3.')
while True:
ex = [single_example(max_length, min_length) for _ in range(batch_size)]
padded_batch = [pad_to_max_dims(x, boundary=pad_to_multiple,
strict_pad_on_len=True)
for x in zip(*ex)]
yield tuple(padded_batch)
return batches(max_length, min_length) |
Computes span corruption masks given input parameters. | def random_spans_noise_mask(length,
noise_density=0.15,
mean_noise_span_length=3.0,
seed1=None,
seed2=None,
example=None):
"""Computes span corruption masks given input parameters."""
# Passing this in case if we want to use for debugging/logging
del example
orig_length = length
# increase length to avoid degeneracy
length = max(length, 2)
num_noise_tokens = int(round(length * noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = max(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# Pick the lengths of the noise spans and the non-noise spans
def randomly_segment(num_items, num_segments, seed):
x = np.arange(num_items - 1) < num_segments - 1
# Set random seed if passed (only in tests for now).
if seed is not None:
np.random.seed(seed)
np.random.shuffle(x)
first_in_segment = np.pad(x, (1, 0), mode='constant')
segment_id = np.cumsum(first_in_segment)
y = np.roll(segment_id, 1)
y[0] = 0
idxs = np.pad(np.squeeze(np.argwhere(segment_id - y), axis=1),
(1, 0),
mode='constant')
segment_lengths = np.add.reduceat(np.ones_like(segment_id), idxs, axis=0)
return segment_lengths
noise_span_lengths = randomly_segment(
num_noise_tokens, num_noise_spans, seed1)
nonnoise_span_lengths = randomly_segment(
num_nonnoise_tokens, num_noise_spans, seed2)
interleaved_span_lengths = np.reshape(
np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1),
[num_noise_spans * 2])
span_starts = np.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = np.zeros(length) # all 0s to begin with
span_start_indicator[span_starts] = 1
span_num = np.cumsum(span_start_indicator)
is_noise = np.equal(span_num % 2, 1)
return is_noise[:orig_length] |
Helper function: convert a list of digits in the given base to a number. | def lower_endian_to_number(l, base):
"""Helper function: convert a list of digits in the given base to a number."""
return sum([d * (base**i) for i, d in enumerate(l)]) |
Helper function: convert a number to a list of digits in the given base. | def number_to_lower_endian(n, base):
"""Helper function: convert a number to a list of digits in the given base."""
if n < base:
return [n]
return [n % base] + number_to_lower_endian(n // base, base) |
Helper function: generate a random number as a lower-endian digits list. | def random_number_lower_endian(length, base):
"""Helper function: generate a random number as a lower-endian digits list."""
if length == 1: # Last digit can be 0 only if length is 1.
return [np.random.randint(base)]
prefix = [np.random.randint(base) for _ in range(length - 1)]
return prefix + [np.random.randint(base - 1) + 1] |
Count the number of items in the generator, skip already counted ones.
This function counts the number of processed examples and puts it into
the global variable `counters`. This variable can be saved and restored,
and if restored, this function will skip examples until the restored counter
is reached. When the data generator is deterministic, this allows to restore
the data reading process from a checkpoint.
Args:
generator: generator for examples in the dataset.
name: string, a unique id that we use to count the examples
Yields:
The examples from generator but first skip the number specified in the
global variable counters[name] and next increment this variable every
time a new example appears. | def count_and_skip(generator, name):
"""Count the number of items in the generator, skip already counted ones.
This function counts the number of processed examples and puts it into
the global variable `counters`. This variable can be saved and restored,
and if restored, this function will skip examples until the restored counter
is reached. When the data generator is deterministic, this allows to restore
the data reading process from a checkpoint.
Args:
generator: generator for examples in the dataset.
name: string, a unique id that we use to count the examples
Yields:
The examples from generator but first skip the number specified in the
global variable counters[name] and next increment this variable every
time a new example appears.
"""
global data_counters
local_counter = 0
for example in generator:
local_counter += 1
# This check must be inside the loop due to asynchronous initializations.
if name not in data_counters:
data_counters[name] = 0
if local_counter > data_counters[name]:
data_counters[name] += 1
yield example |
Checkpoint data counters. | def save_data_counters(output_dir, host_id=None):
"""Checkpoint data counters."""
global data_counters
host_id = jax.process_index() if host_id is None else host_id
fname = os.path.join(output_dir, 'data_counters%d.pkl' % host_id)
with tf.io.gfile.GFile(fname, 'wb') as f:
pickle.dump(data_counters, f) |
Checkpoint data counters. | def load_data_counters(output_dir, host_id=None):
"""Checkpoint data counters."""
global data_counters
host_id = jax.process_index() if host_id is None else host_id
fname = os.path.join(output_dir, 'data_counters%d.pkl' % host_id)
if not tf.io.gfile.exists(fname):
logging.info('Did not load data counters as %s does not exist.', fname)
return
with tf.io.gfile.GFile(fname, 'rb') as f:
obj = pickle.load(f)
data_counters = obj |
Creates heuristically a set of bucket boundaries and sizes.
The middle boundary is set to `bucket_length` and the corresponding batch
size is set to `batch_size`. We also create buckets of 1/2 and 1/4 length
with 2x and 4x batch size, and buckets of 2x and 4x and larger length with
1/2 and 1/4 batch size respectively, and batch size 1 for the final one.
Args:
bucket_length: the length of the middle bucket.
batch_size: the batch size for the middle bucket.
max_eval_length: the longest bucket length if training=False.
n_devices: number of devices, batch sizes are divisible by that.
training: bool, whether we are training or evaluating.
Returns:
a pair of lists of integers, (bucket_boundaries, bucket_batch_sizes). | def _buckets_for_length(bucket_length, batch_size, max_eval_length, n_devices,
training):
"""Creates heuristically a set of bucket boundaries and sizes.
The middle boundary is set to `bucket_length` and the corresponding batch
size is set to `batch_size`. We also create buckets of 1/2 and 1/4 length
with 2x and 4x batch size, and buckets of 2x and 4x and larger length with
1/2 and 1/4 batch size respectively, and batch size 1 for the final one.
Args:
bucket_length: the length of the middle bucket.
batch_size: the batch size for the middle bucket.
max_eval_length: the longest bucket length if training=False.
n_devices: number of devices, batch sizes are divisible by that.
training: bool, whether we are training or evaluating.
Returns:
a pair of lists of integers, (bucket_boundaries, bucket_batch_sizes).
"""
bucket_boundaries = [bucket_length // 4, bucket_length // 2,
bucket_length, bucket_length * 2,
bucket_length * 4, bucket_length * 8,
bucket_length * 16]
if not training:
max_eval_length = max_eval_length or bucket_length * 32
# Set last bucket boundary to be max_eval_length, cut off boundaries
# that are larger than this.
bucket_boundaries = (
[b for b in bucket_boundaries if b < max_eval_length] +
[max_eval_length]
)
bucket_boundaries.append(max_eval_length)
bucket_batch_sizes = [batch_size * 4, batch_size * 2,
batch_size, batch_size // 2,
batch_size // 4, batch_size // 8,
batch_size // 16, 1]
if not training:
# The last bucket batch size is always 1, but the one-but-last is
# sized to accommodate the final length = bucket_boundaries[-1], which
# we changed for eval above -- so adjusting here too.
# Resize if needed, since bucket_batch_sizes may not be the same size
# anymore.
bucket_batch_sizes = bucket_batch_sizes[:len(bucket_boundaries)] + [1]
bucket_batch_sizes[-2] = batch_size // max_eval_length
# Make batch sizes divisible by n_devices.
bucket_batch_sizes = [max(b // n_devices, 1) * n_devices
for b in bucket_batch_sizes]
return (bucket_boundaries, bucket_batch_sizes) |
Length is the maximum of shape on length_axis over length_keys. | def _length_fn(example, length_axis, length_keys):
"""Length is the maximum of shape on length_axis over length_keys."""
if isinstance(example, (list, tuple)):
return max([example[i].shape[length_axis] for i in length_keys])
return example.shape[length_axis] |
Create Inputs from two streams; mostly for use in gin configs. | def make_inputs(train_stream=gin.REQUIRED, eval_stream=None):
"""Create Inputs from two streams; mostly for use in gin configs."""
if isinstance(train_stream, (list, tuple)):
train_stream = Serial(train_stream)()
if isinstance(eval_stream, (list, tuple)):
eval_stream = Serial(eval_stream)()
eval_stream_fn = None if eval_stream is None else lambda _: eval_stream
return Inputs(train_stream=lambda _: train_stream,
eval_stream=eval_stream_fn) |
Create a stream mostly for use in gin configs for additional tasks. | def make_additional_stream(stream=gin.REQUIRED):
"""Create a stream mostly for use in gin configs for additional tasks."""
return Serial(stream)() |
Create a parallel stream for use in gin configs for additional tasks. | def make_parallel_stream(streams=gin.REQUIRED, counters=None):
"""Create a parallel stream for use in gin configs for additional tasks."""
return Parallel(streams, counters=counters)() |
Batcher: create trax Inputs from single-example data-streams. | def batcher(data_streams=gin.REQUIRED, variable_shapes=True,
batch_size_per_device=32, batch_size=None, eval_batch_size=32,
bucket_length=32, buckets=None,
buckets_include_inputs_in_length=False,
batch_shuffle_size=None, max_eval_length=None,
# TODO(afrozm): Unify padding logic.
id_to_mask=None, strict_pad_on_len=False):
"""Batcher: create trax Inputs from single-example data-streams."""
# TODO(lukaszkaiser, jonni): revisit arguments, their semantics and naming.
# For now leaving the arguments as in batch_fn to reduce gin config changes.
if callable(data_streams): # If we pass a function, e.g., through gin, call.
train_stream, eval_stream = data_streams()
else:
train_stream, eval_stream = data_streams
# pylint: disable=g-long-lambda
batch_train_stream = lambda n_devices: batch_fn(
train_stream(), True, n_devices, variable_shapes,
batch_size_per_device, batch_size, eval_batch_size,
bucket_length, buckets, buckets_include_inputs_in_length,
batch_shuffle_size, max_eval_length, id_to_mask, strict_pad_on_len)
batch_eval_stream = lambda n_devices: batch_fn(
eval_stream(), False, n_devices, variable_shapes,
batch_size_per_device, batch_size, eval_batch_size,
bucket_length, buckets, buckets_include_inputs_in_length,
batch_shuffle_size, max_eval_length, id_to_mask, strict_pad_on_len)
batch_train_eval_stream = lambda n_devices: batch_fn(
train_stream(), False, n_devices, variable_shapes,
batch_size_per_device, batch_size, eval_batch_size,
bucket_length, buckets, buckets_include_inputs_in_length,
batch_shuffle_size, max_eval_length, id_to_mask, strict_pad_on_len)
# pylint: enable=g-long-lambda
return Inputs(train_stream=batch_train_stream,
eval_stream=batch_eval_stream,
train_eval_stream=batch_train_eval_stream) |
Batching function. | def batch_fn(dataset, training, n_devices, variable_shapes,
batch_size_per_device=32, batch_size=None, eval_batch_size=32,
bucket_length=32, buckets=None,
buckets_include_inputs_in_length=False,
batch_shuffle_size=None, max_eval_length=None,
id_to_mask=None, strict_pad_on_len=False):
"""Batching function."""
# TODO(lukaszkaiser, jonni): revisit arguments, their semantics and naming.
# After that, create a proper doc-string; we may also not need to pass both
# training and eval arguments here, as batcher calls the function separately
# now and it's not under gin-config any more -- consider reducing args.
batch_size = batch_size or batch_size_per_device * n_devices
# If bucketing is not specified, check if target shapes are variable.
cur_batch_size = batch_size if training else eval_batch_size
# Make cur_batch_size divisible by n_devices.
cur_batch_size = max(cur_batch_size // n_devices, 1) * n_devices
# Create heuristic buckets if none are specified.
if buckets is None:
logging.info('Heuristically setting bucketing to %s based on shapes '
'of target tensors.', variable_shapes)
if variable_shapes:
buckets = _buckets_for_length(
bucket_length, cur_batch_size, max_eval_length, n_devices, training)
if buckets:
logging.info('Bucketing with buckets %s.', str(buckets))
def example_length(x):
"""The length function used by bucket_by_sequence_length to bucket."""
# The input x is a tuple to go on the stack, typically either
# (input, target) or (input, target, mask).
example_inputs, target = x[0], x[1]
# Length is the shape of axis 0 here (no batch yet).
other_length = 0 # We include input length only if asked.
if buckets_include_inputs_in_length:
other_length = example_inputs.shape[0]
return max(target.shape[0], other_length)
boundaries, batch_sizes = buckets
dataset = bucket_by_length(
dataset, example_length, boundaries, batch_sizes, strict_pad_on_len)
else:
logging.info('Not Bucketing cur_batch_size %d.', cur_batch_size)
dataset = batch(dataset, cur_batch_size)
if training and batch_shuffle_size is not None:
dataset = shuffle(dataset, batch_shuffle_size)
return add_loss_weights(dataset, id_to_mask) |
Make random Inputs for debugging.
Args:
input_shape: the shape of inputs (including batch dimension).
input_dtype: the type of the inputs (int32 by default).
input_range: the range of inputs (defaults to (0, 255)).
output_shape: the shape of outputs (including batch dimension).
output_dtype: the type of the outputs (int32 by default).
output_range: the range of outputs (defaults to (0, 9)).
Returns:
trax.inputs.Inputs | def random_inputs(
input_shape=gin.REQUIRED, input_dtype=jnp.int32, input_range=(0, 255),
output_shape=gin.REQUIRED, output_dtype=jnp.int32, output_range=(0, 9)):
"""Make random Inputs for debugging.
Args:
input_shape: the shape of inputs (including batch dimension).
input_dtype: the type of the inputs (int32 by default).
input_range: the range of inputs (defaults to (0, 255)).
output_shape: the shape of outputs (including batch dimension).
output_dtype: the type of the outputs (int32 by default).
output_range: the range of outputs (defaults to (0, 9)).
Returns:
trax.inputs.Inputs
"""
def random_minibatches(n_devices):
"""Generate a stream of random mini-batches."""
assert input_range[0] % n_devices == 0
if input_dtype in [jnp.float16, jnp.float32, jnp.float64]:
rand = np.random.uniform
else:
rand = np.random.random_integers
while True:
inp = rand(input_range[0], input_range[1], input_shape)
inp = inp.astype(input_dtype)
out = rand(output_range[0], output_range[1], output_shape)
out = out.astype(output_dtype)
yield inp, out
return Inputs(random_minibatches) |
Inputs for the sequence copy problem: 0w0w for w in [1..vocab_size-1]*.
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
train_length: maximum length of w for training.
eval_min_length: minimum length of w for eval.
eval_max_length : maximum length of w for eval.
reverse: bool (optional, false by default): reverse the second sequence.
pad_to_multiple: int, pad length to be multiple of this number.
Returns:
trax.inputs.Inputs | def sequence_copy_inputs(
vocab_size=gin.REQUIRED, batch_size=gin.REQUIRED, train_length=gin.REQUIRED,
eval_min_length=gin.REQUIRED, eval_max_length=gin.REQUIRED, reverse=False,
pad_to_multiple=32):
"""Inputs for the sequence copy problem: 0w0w for w in [1..vocab_size-1]*.
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
train_length: maximum length of w for training.
eval_min_length: minimum length of w for eval.
eval_max_length : maximum length of w for eval.
reverse: bool (optional, false by default): reverse the second sequence.
pad_to_multiple: int, pad length to be multiple of this number.
Returns:
trax.inputs.Inputs
"""
def random_minibatches(length_list):
"""Generate a stream of random mini-batches."""
while True:
length = random.choice(length_list)
assert length % 2 == 0
w_length = (length // 2) - 1
w = np.random.randint(low=1, high=vocab_size-1,
size=(batch_size, w_length))
zero = np.zeros([batch_size, 1], np.int32)
loss_weights = np.concatenate([np.zeros((batch_size, w_length+2)),
np.ones((batch_size, w_length))], axis=1)
if reverse:
x = np.concatenate([zero, w, zero, jnp.flip(w, axis=1)], axis=1)
else:
x = np.concatenate([zero, w, zero, w], axis=1)
x = _pad_to_multiple_of(x, pad_to_multiple, 1)
loss_weights = _pad_to_multiple_of(loss_weights, pad_to_multiple, 1)
yield (x, x, loss_weights) # Here inputs and targets are the same.
train_lengths = [2*(i+2) for i in range(train_length - 1)]
eval_lengths = [2*(i+1) for i in range(eval_min_length, eval_max_length)]
return Inputs(
train_stream=lambda _: random_minibatches(train_lengths),
eval_stream=lambda _: random_minibatches(eval_lengths)
) |
Inputs for the sequence copy problem: w for w in [1..vocab_size-1]*.
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
train_length: maximum length of w for training.
eval_min_length: minimum length of w for eval.
eval_max_length : maximum length of w for eval.
pad_to_multiple: int, pad length to be multiple of this number.
Returns:
trax.inputs.Inputs | def simple_sequence_copy_inputs(
vocab_size=gin.REQUIRED, batch_size=gin.REQUIRED, train_length=gin.REQUIRED,
eval_min_length=gin.REQUIRED, eval_max_length=gin.REQUIRED,
pad_to_multiple=32):
"""Inputs for the sequence copy problem: w for w in [1..vocab_size-1]*.
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
train_length: maximum length of w for training.
eval_min_length: minimum length of w for eval.
eval_max_length : maximum length of w for eval.
pad_to_multiple: int, pad length to be multiple of this number.
Returns:
trax.inputs.Inputs
"""
def random_minibatches(length_list):
"""Generate a stream of random mini-batches."""
while True:
length = random.choice(length_list)
x = np.random.randint(low=1, high=vocab_size-1,
size=(batch_size, length))
loss_weights = np.ones((batch_size, length))
x = _pad_to_multiple_of(x, pad_to_multiple, 1)
loss_weights = _pad_to_multiple_of(loss_weights, pad_to_multiple, 1)
yield (x, x, loss_weights) # Here inputs and targets are the same.
train_lengths = list(range(1, train_length + 1))
eval_lengths = list(range(eval_min_length, eval_max_length + 1))
return Inputs(
train_stream=lambda _: random_minibatches(train_lengths),
eval_stream=lambda _: random_minibatches(eval_lengths)
) |
Inputs for the add problem: <S>x+y<S>(x+y).
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
train_length: maximal length of w for training.
eval_min_length: minimal length of w for eval.
eval_max_length: maximal length of w for eval.
pad_to_multiple: int, pad length to be multiple of this number.
encdec: bool, if True return encoder-decoder style inputs (default: False)
Returns:
trax.inputs.Inputs | def addition_inputs(
vocab_size=gin.REQUIRED, batch_size=gin.REQUIRED, train_length=gin.REQUIRED,
eval_min_length=gin.REQUIRED, eval_max_length=gin.REQUIRED,
pad_to_multiple=32, encdec=False):
"""Inputs for the add problem: <S>x+y<S>(x+y).
Args:
vocab_size: how many symbols to use.
batch_size: how large are the batches.
train_length: maximal length of w for training.
eval_min_length: minimal length of w for eval.
eval_max_length: maximal length of w for eval.
pad_to_multiple: int, pad length to be multiple of this number.
encdec: bool, if True return encoder-decoder style inputs (default: False)
Returns:
trax.inputs.Inputs
"""
train_stream = addition_input_stream(
vocab_size, batch_size, 3, train_length, pad_to_multiple, encdec)
eval_stream = addition_input_stream(
vocab_size, batch_size, eval_min_length, eval_max_length, pad_to_multiple,
encdec)
return Inputs(
train_stream=lambda _: train_stream,
eval_stream=lambda _: eval_stream
) |
Sinusoids of random period and phase.
Args:
batch_size (int): Number of examples in a batch.
length (int): Length of each sequence.
max_phase (float): Maximum phase of the sinusoids.
min_period (float): Minimum period of the sinusoids.
max_period (float): Maximum period of the sinusoids.
Returns:
trax.inputs.Inputs | def sine_inputs(
batch_size=gin.REQUIRED,
length=gin.REQUIRED,
max_phase=(2 * math.pi),
min_period=0.1,
max_period=10.0,
):
"""Sinusoids of random period and phase.
Args:
batch_size (int): Number of examples in a batch.
length (int): Length of each sequence.
max_phase (float): Maximum phase of the sinusoids.
min_period (float): Minimum period of the sinusoids.
max_period (float): Maximum period of the sinusoids.
Returns:
trax.inputs.Inputs
"""
def random_series():
while True:
phase = np.random.uniform(0, max_phase)
period = np.exp(np.random.uniform(np.log(min_period), np.log(max_period)))
x = np.arange(length)
yield np.sin((x - phase) / period)
def random_minibatches(_):
minibatch = []
for series in random_series():
minibatch.append(series)
if len(minibatch) == batch_size:
obs = np.stack(minibatch)
minibatch.clear()
act = np.zeros_like(obs, dtype=np.int32)
mask = np.ones_like(obs)
yield (obs, act, obs, mask)
return Inputs(train_stream=random_minibatches, eval_stream=random_minibatches) |
Pads x to multiple of y on the given axis. | def _pad_to_multiple_of(x, y, axis):
"""Pads x to multiple of y on the given axis."""
pad_len = np.ceil(x.shape[axis] / float(y)) * y
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (0, int(pad_len - x.shape[axis]))
return np.pad(x, pad_widths, mode='constant',
constant_values=x.dtype.type(0)) |
Strip ids_to_strip from the end IDs. | def strip_ids(ids, ids_to_strip):
"""Strip ids_to_strip from the end IDs."""
ids = list(ids)
while ids and ids[-1] in ids_to_strip:
ids.pop()
return ids |
Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode. | def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_" |
Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string | def _unescape_token(escaped_token):
"""Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
"""
def match(m):
if m.group(1) is None:
return u"_" if m.group(0) == u"\\u" else u"\\"
try:
return six.unichr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return u"\u3013" # Unicode for undefined character.
trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed) |
Checks whether `chars` is a whitespace character. | def _bert_is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False |
Checks whether `chars` is a control character. | def _bert_is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False |
Checks whether `chars` is a punctuation character. | def _bert_is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False |
Runs basic whitespace cleaning and splitting on a piece of text. | def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens |
Get the T5 data module if available. | def t5_data():
"""Get the T5 data module if available."""
module = None
try:
import t5.data # pylint: disable=g-import-not-at-top
module = t5.data
except AttributeError as e:
logging.error('pip install t5')
raise e
return module |
Creates `(train, eval)` data sources from ``dataset_name``.
Args:
dataset_name: Name of dataset belonging to TFDS or T2T. T2T dataset names
must start with ``'t2t_'``.
data_dir: Directory where the data is located.
preprocess_fn: Function to use for pre-processing after appending targets to
inputs.
bare_preprocess_fn: Function to use for pre-processing before appending
targets to inputs.
shuffle_buffer_size: Size of the shuffle buffer.
eval_holdout_size: If greater than 0, specifies a fraction of training data
to siphon off and use as eval data, in place of an separate eval split.
input_name: Name of the inputs from the dictionary.
target_name: Name of the outputs either from the dictionary or as a result
of post-processing.
Returns:
A pair of functions, `(f, g)` for use as data sources; call `f()` to get an
iterator of training data samples, and call `g()` to get an iterator of eval
data samples. | def data_streams(dataset_name,
data_dir=None,
preprocess_fn=no_preprocess,
bare_preprocess_fn=None,
shuffle_buffer_size=1024,
eval_holdout_size=0,
input_name=None,
target_name=None):
"""Creates `(train, eval)` data sources from ``dataset_name``.
Args:
dataset_name: Name of dataset belonging to TFDS or T2T. T2T dataset names
must start with ``'t2t_'``.
data_dir: Directory where the data is located.
preprocess_fn: Function to use for pre-processing after appending targets to
inputs.
bare_preprocess_fn: Function to use for pre-processing before appending
targets to inputs.
shuffle_buffer_size: Size of the shuffle buffer.
eval_holdout_size: If greater than 0, specifies a fraction of training data
to siphon off and use as eval data, in place of an separate eval split.
input_name: Name of the inputs from the dictionary.
target_name: Name of the outputs either from the dictionary or as a result
of post-processing.
Returns:
A pair of functions, `(f, g)` for use as data sources; call `f()` to get an
iterator of training data samples, and call `g()` to get an iterator of eval
data samples.
"""
data_dir = download_and_prepare(dataset_name, data_dir)
cache = []
def stream(which):
"""Create the stream, cache TF streams if needed."""
if not cache:
cache.append(
_train_and_eval_streams(dataset_name, data_dir, preprocess_fn,
bare_preprocess_fn, shuffle_buffer_size,
eval_holdout_size, input_name, target_name))
(train_ds, eval_ds, input_name_c) = cache[0]
dataset = eval_ds if which == 'eval' else train_ds
return dataset_to_stream(dataset, input_name_c)
train_stream = lambda: stream('train')
eval_stream = lambda: stream('eval')
return train_stream, eval_stream |
Takes a tf.Dataset and creates a numpy stream of ready batches. | def dataset_to_stream(dataset, input_name):
"""Takes a tf.Dataset and creates a numpy stream of ready batches."""
# All input-pipeline processing should be on CPU.
for example in fastmath.dataset_as_numpy(dataset):
features = example[0]
inp, out = features[input_name], example[1]
mask = features['mask'] if 'mask' in features else None
# Some accelerators don't handle uint8 well, cast to int.
if isinstance(inp, np.uint8):
inp = inp.astype(np.int32)
if isinstance(out, np.uint8):
out = out.astype(np.int32)
yield (inp, out) if mask is None else (inp, out, mask) |
Return train and eval batches with input name and shape. | def _train_and_eval_streams(dataset, data_dir, preprocess_fn,
bare_preprocess_fn, shuffle_buffer_size,
eval_holdout_size, input_name, target_name):
"""Return train and eval batches with input name and shape."""
(train_data, eval_data,
keys) = _train_and_eval_dataset(dataset, data_dir, eval_holdout_size)
# If provided select input_name/target_name else fall back to keys if that is
# available, else [None].
input_names = ([input_name] if input_name is not None else
keys[0] if keys is not None else [None])
target_names = ([target_name] if target_name is not None else
keys[1] if keys is not None else [None])
train_batches = _shuffle_data(train_data, target_names, True,
shuffle_buffer_size, preprocess_fn,
bare_preprocess_fn)
eval_batches = _shuffle_data(eval_data, target_names, False,
shuffle_buffer_size, preprocess_fn,
bare_preprocess_fn)
return (train_batches, eval_batches, input_names[0]) |
Shuffle the given dataset and run pre-processing. | def _shuffle_data(dataset, target_names, training, shuffle_buffer_size,
preprocess_fn, bare_preprocess_fn):
"""Shuffle the given dataset and run pre-processing."""
def append_targets(example):
"""Append targets to the example dictionary. Needed for Keras."""
if len(target_names) == 1:
return (example, example[target_names[0]])
targets = {}
for name in target_names:
targets[name] = example[name]
return (example, targets)
# `bare_preprocess_fn` is called before appending targets etc.
if bare_preprocess_fn is not None:
dataset = bare_preprocess_fn(dataset, training)
dataset = dataset.map(append_targets)
# TODO(pkozakowski): Repeat both the training and evaluation set, so we don't
# have incomplete batches during evaluation. This will be a problem when we
# add an option to evaluate on the whole dataset, then we'll need to think of
# a different solution.
dataset = dataset.repeat()
if training:
# Skip a random fraction at the beginning of the stream. The skip is
# essential for synchronous highly-parallel training to avoid multiple
# replicas reading the same data in lock-step.
dataset = dataset.skip(random.randint(0, _MAX_SKIP_EXAMPLES))
dataset = preprocess_fn(dataset, training)
dataset = dataset.shuffle(shuffle_buffer_size)
return dataset.prefetch(8) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.