python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import tensorboard
from distutils.version import LooseVersion
if not hasattr(tensorboard, "__version__") or LooseVersion(
tensorboard.__version__
) < LooseVersion("1.15"):
raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above")
del LooseVersion
del tensorboard
from .writer import FileWriter, SummaryWriter # noqa: F401
from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401
|
pytorch-master
|
torch/utils/tensorboard/__init__.py
|
import math
import numpy as np
from ._convert_np import make_np
from ._utils import make_grid
from tensorboard.compat import tf
from tensorboard.plugins.projector.projector_config_pb2 import EmbeddingInfo
def make_tsv(metadata, save_path, metadata_header=None):
if not metadata_header:
metadata = [str(x) for x in metadata]
else:
assert len(metadata_header) == len(
metadata[0]
), "len of header must be equal to the number of columns in metadata"
metadata = ["\t".join(str(e) for e in l) for l in [metadata_header] + metadata]
metadata_bytes = tf.compat.as_bytes("\n".join(metadata) + "\n")
fs = tf.io.gfile.get_filesystem(save_path)
fs.write(fs.join(save_path, "metadata.tsv"), metadata_bytes, binary_mode=True)
# https://github.com/tensorflow/tensorboard/issues/44 image label will be squared
def make_sprite(label_img, save_path):
from PIL import Image
from io import BytesIO
# this ensures the sprite image has correct dimension as described in
# https://www.tensorflow.org/get_started/embedding_viz
nrow = int(math.ceil((label_img.size(0)) ** 0.5))
arranged_img_CHW = make_grid(make_np(label_img), ncols=nrow)
# augment images so that #images equals nrow*nrow
arranged_augment_square_HWC = np.zeros(
(arranged_img_CHW.shape[2], arranged_img_CHW.shape[2], 3)
)
arranged_img_HWC = arranged_img_CHW.transpose(1, 2, 0) # chw -> hwc
arranged_augment_square_HWC[: arranged_img_HWC.shape[0], :, :] = arranged_img_HWC
im = Image.fromarray(np.uint8((arranged_augment_square_HWC * 255).clip(0, 255)))
with BytesIO() as buf:
im.save(buf, format="PNG")
im_bytes = buf.getvalue()
fs = tf.io.gfile.get_filesystem(save_path)
fs.write(fs.join(save_path, "sprite.png"), im_bytes, binary_mode=True)
def get_embedding_info(metadata, label_img, filesys, subdir, global_step, tag):
info = EmbeddingInfo()
info.tensor_name = "{}:{}".format(tag, str(global_step).zfill(5))
info.tensor_path = filesys.join(subdir, "tensors.tsv")
if metadata is not None:
info.metadata_path = filesys.join(subdir, "metadata.tsv")
if label_img is not None:
info.sprite.image_path = filesys.join(subdir, "sprite.png")
info.sprite.single_image_dim.extend([label_img.size(3), label_img.size(2)])
return info
def write_pbtxt(save_path, contents):
fs = tf.io.gfile.get_filesystem(save_path)
config_path = fs.join(save_path, "projector_config.pbtxt")
fs.write(config_path, tf.compat.as_bytes(contents), binary_mode=True)
def make_mat(matlist, save_path):
fs = tf.io.gfile.get_filesystem(save_path)
with tf.io.gfile.GFile(fs.join(save_path, "tensors.tsv"), "wb") as f:
for x in matlist:
x = [str(i.item()) for i in x]
f.write(tf.compat.as_bytes("\t".join(x) + "\n"))
|
pytorch-master
|
torch/utils/tensorboard/_embedding.py
|
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
def load_onnx_graph(fname):
import onnx
m = onnx.load(fname)
g = m.graph
return parse(g)
def parse(graph):
nodes_proto = []
nodes = []
import itertools
for node in itertools.chain(graph.input, graph.output):
nodes_proto.append(node)
for node in nodes_proto:
print(node.name)
shapeproto = TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=d.dim_value)
for d in node.type.tensor_type.shape.dim
]
)
nodes.append(
NodeDef(
name=node.name.encode(encoding="utf_8"),
op="Variable",
input=[],
attr={
"dtype": AttrValue(type=node.type.tensor_type.elem_type),
"shape": AttrValue(shape=shapeproto),
},
)
)
for node in graph.node:
_attr = []
for s in node.attribute:
_attr.append(" = ".join([str(f[1]) for f in s.ListFields()]))
attr = ", ".join(_attr).encode(encoding="utf_8")
print(node.output[0])
nodes.append(
NodeDef(
name=node.output[0].encode(encoding="utf_8"),
op=node.op_type,
input=node.input,
attr={"parameters": AttrValue(s=attr)},
)
)
# two pass token replacement, appends opname to object id
mapping = {}
for node in nodes:
mapping[node.name] = node.op + "_" + node.name
return GraphDef(node=nodes, versions=VersionDef(producer=22))
|
pytorch-master
|
torch/utils/tensorboard/_onnx_graph.py
|
import json
import logging
import os
from typing import Optional
import numpy as np
from google.protobuf import struct_pb2
# pylint: disable=unused-import
from six.moves import range
from tensorboard.compat.proto.summary_pb2 import HistogramProto
from tensorboard.compat.proto.summary_pb2 import Summary
from tensorboard.compat.proto.summary_pb2 import SummaryMetadata
from tensorboard.compat.proto.tensor_pb2 import TensorProto
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from tensorboard.plugins.custom_scalar import layout_pb2
from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData
from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData
from ._convert_np import make_np
from ._utils import _prepare_video, convert_to_HWC
__all__ = ['hparams', 'scalar', 'histogram_raw', 'histogram', 'make_histogram', 'image', 'image_boxes', 'draw_boxes',
'make_image', 'video', 'make_video', 'audio', 'custom_scalars', 'text', 'pr_curve_raw', 'pr_curve', 'compute_curve',
'mesh']
logger = logging.getLogger(__name__)
def _calc_scale_factor(tensor):
converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor
return 1 if converted.dtype == np.uint8 else 255
def _draw_single_box(
image,
xmin,
ymin,
xmax,
ymax,
display_str,
color="black",
color_text="black",
thickness=2,
):
from PIL import ImageDraw, ImageFont
font = ImageFont.load_default()
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=thickness,
fill=color,
)
if display_str:
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[
(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom),
],
fill=color,
)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill=color_text,
font=font,
)
return image
def hparams(hparam_dict=None, metric_dict=None, hparam_domain_discrete=None):
"""Outputs three `Summary` protocol buffers needed by hparams plugin.
`Experiment` keeps the metadata of an experiment, such as the name of the
hyperparameters and the name of the metrics.
`SessionStartInfo` keeps key-value pairs of the hyperparameters
`SessionEndInfo` describes status of the experiment e.g. STATUS_SUCCESS
Args:
hparam_dict: A dictionary that contains names of the hyperparameters
and their values.
metric_dict: A dictionary that contains names of the metrics
and their values.
hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that
contains names of the hyperparameters and all discrete values they can hold
Returns:
The `Summary` protobufs for Experiment, SessionStartInfo and
SessionEndInfo
"""
import torch
from six import string_types
from tensorboard.plugins.hparams.api_pb2 import (
Experiment,
HParamInfo,
MetricInfo,
MetricName,
Status,
DataType,
)
from tensorboard.plugins.hparams.metadata import (
PLUGIN_NAME,
PLUGIN_DATA_VERSION,
EXPERIMENT_TAG,
SESSION_START_INFO_TAG,
SESSION_END_INFO_TAG,
)
from tensorboard.plugins.hparams.plugin_data_pb2 import (
HParamsPluginData,
SessionEndInfo,
SessionStartInfo,
)
# TODO: expose other parameters in the future.
# hp = HParamInfo(name='lr',display_name='learning rate',
# type=DataType.DATA_TYPE_FLOAT64, domain_interval=Interval(min_value=10,
# max_value=100))
# mt = MetricInfo(name=MetricName(tag='accuracy'), display_name='accuracy',
# description='', dataset_type=DatasetType.DATASET_VALIDATION)
# exp = Experiment(name='123', description='456', time_created_secs=100.0,
# hparam_infos=[hp], metric_infos=[mt], user='tw')
if not isinstance(hparam_dict, dict):
logger.warning("parameter: hparam_dict should be a dictionary, nothing logged.")
raise TypeError(
"parameter: hparam_dict should be a dictionary, nothing logged."
)
if not isinstance(metric_dict, dict):
logger.warning("parameter: metric_dict should be a dictionary, nothing logged.")
raise TypeError(
"parameter: metric_dict should be a dictionary, nothing logged."
)
hparam_domain_discrete = hparam_domain_discrete or {}
if not isinstance(hparam_domain_discrete, dict):
raise TypeError(
"parameter: hparam_domain_discrete should be a dictionary, nothing logged."
)
for k, v in hparam_domain_discrete.items():
if (
k not in hparam_dict
or not isinstance(v, list)
or not all(isinstance(d, type(hparam_dict[k])) for d in v)
):
raise TypeError(
"parameter: hparam_domain_discrete[{}] should be a list of same type as "
"hparam_dict[{}].".format(k, k)
)
hps = []
ssi = SessionStartInfo()
for k, v in hparam_dict.items():
if v is None:
continue
if isinstance(v, int) or isinstance(v, float):
ssi.hparams[k].number_value = v
if k in hparam_domain_discrete:
domain_discrete: Optional[struct_pb2.ListValue] = struct_pb2.ListValue(
values=[
struct_pb2.Value(number_value=d)
for d in hparam_domain_discrete[k]
]
)
else:
domain_discrete = None
hps.append(
HParamInfo(
name=k,
type=DataType.Value("DATA_TYPE_FLOAT64"),
domain_discrete=domain_discrete,
)
)
continue
if isinstance(v, string_types):
ssi.hparams[k].string_value = v
if k in hparam_domain_discrete:
domain_discrete = struct_pb2.ListValue(
values=[
struct_pb2.Value(string_value=d)
for d in hparam_domain_discrete[k]
]
)
else:
domain_discrete = None
hps.append(
HParamInfo(
name=k,
type=DataType.Value("DATA_TYPE_STRING"),
domain_discrete=domain_discrete,
)
)
continue
if isinstance(v, bool):
ssi.hparams[k].bool_value = v
if k in hparam_domain_discrete:
domain_discrete = struct_pb2.ListValue(
values=[
struct_pb2.Value(bool_value=d)
for d in hparam_domain_discrete[k]
]
)
else:
domain_discrete = None
hps.append(
HParamInfo(
name=k,
type=DataType.Value("DATA_TYPE_BOOL"),
domain_discrete=domain_discrete,
)
)
continue
if isinstance(v, torch.Tensor):
v = make_np(v)[0]
ssi.hparams[k].number_value = v
hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_FLOAT64")))
continue
raise ValueError(
"value should be one of int, float, str, bool, or torch.Tensor"
)
content = HParamsPluginData(session_start_info=ssi, version=PLUGIN_DATA_VERSION)
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString()
)
)
ssi = Summary(value=[Summary.Value(tag=SESSION_START_INFO_TAG, metadata=smd)])
mts = [MetricInfo(name=MetricName(tag=k)) for k in metric_dict.keys()]
exp = Experiment(hparam_infos=hps, metric_infos=mts)
content = HParamsPluginData(experiment=exp, version=PLUGIN_DATA_VERSION)
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString()
)
)
exp = Summary(value=[Summary.Value(tag=EXPERIMENT_TAG, metadata=smd)])
sei = SessionEndInfo(status=Status.Value("STATUS_SUCCESS"))
content = HParamsPluginData(session_end_info=sei, version=PLUGIN_DATA_VERSION)
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString()
)
)
sei = Summary(value=[Summary.Value(tag=SESSION_END_INFO_TAG, metadata=smd)])
return exp, ssi, sei
def scalar(name, tensor, collections=None, new_style=False, double_precision=False):
"""Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric Tensor containing a single value.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
new_style: Whether to use new style (tensor field) or old style (simple_value
field). New style could lead to faster data loading.
Returns:
A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
"""
tensor = make_np(tensor).squeeze()
assert (
tensor.ndim == 0
), f"Tensor should contain one element (0 dimensions). Was given size: {tensor.size} and {tensor.ndim} dimensions."
# python float is double precision in numpy
scalar = float(tensor)
if new_style:
tensor_proto = TensorProto(float_val=[scalar], dtype="DT_FLOAT")
if double_precision:
tensor_proto = TensorProto(double_val=[scalar], dtype="DT_DOUBLE")
plugin_data = SummaryMetadata.PluginData(plugin_name="scalars")
smd = SummaryMetadata(plugin_data=plugin_data)
return Summary(
value=[
Summary.Value(
tag=name,
tensor=tensor_proto,
metadata=smd,
)
]
)
else:
return Summary(value=[Summary.Value(tag=name, simple_value=scalar)])
def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
min: A float or int min value
max: A float or int max value
num: Int number of values
sum: Float or int sum of all values
sum_squares: Float or int sum of squares for all values
bucket_limits: A numeric `Tensor` with upper value per bucket
bucket_counts: A numeric `Tensor` with number of values per bucket
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
hist = HistogramProto(
min=min,
max=max,
num=num,
sum=sum,
sum_squares=sum_squares,
bucket_limit=bucket_limits,
bucket=bucket_counts,
)
return Summary(value=[Summary.Value(tag=name, histo=hist)])
def histogram(name, values, bins, max_bins=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
values = make_np(values)
hist = make_histogram(values.astype(float), bins, max_bins)
return Summary(value=[Summary.Value(tag=name, histo=hist)])
def make_histogram(values, bins, max_bins=None):
"""Convert values into a histogram proto using logic from histogram.cc."""
if values.size == 0:
raise ValueError("The input has no element.")
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
num_bins = len(counts)
if max_bins is not None and num_bins > max_bins:
subsampling = num_bins // max_bins
subsampling_remainder = num_bins % subsampling
if subsampling_remainder != 0:
counts = np.pad(
counts,
pad_width=[[0, subsampling - subsampling_remainder]],
mode="constant",
constant_values=0,
)
counts = counts.reshape(-1, subsampling).sum(axis=-1)
new_limits = np.empty((counts.size + 1,), limits.dtype)
new_limits[:-1] = limits[:-1:subsampling]
new_limits[-1] = limits[-1]
limits = new_limits
# Find the first and the last bin defining the support of the histogram:
cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32))
start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right")
start = int(start)
end = int(end) + 1
del cum_counts
# TensorBoard only includes the right bin limits. To still have the leftmost limit
# included, we include an empty bin left.
# If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the
# first nonzero-count bin:
counts = (
counts[start - 1 : end] if start > 0 else np.concatenate([[0], counts[:end]])
)
limits = limits[start : end + 1]
if counts.size == 0 or limits.size == 0:
raise ValueError("The histogram is empty, please file a bug report.")
sum_sq = values.dot(values)
return HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist(),
)
def image(tag, tensor, rescale=1, dataformats="NCHW"):
"""Outputs a `Summary` protocol buffer with images.
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 3-D with shape `[height, width,
channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The `name` in the outputted Summary.Value protobufs is generated based on the
name, with a suffix depending on the max_outputs setting:
* If `max_outputs` is 1, the summary value tag is '*name*/image'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*name*/image/0', '*name*/image/1', etc.
Args:
tag: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
channels]` where `channels` is 1, 3, or 4.
'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).
The image() function will scale the image values to [0, 255] by applying
a scale factor of either 1 (uint8) or 255 (float32).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
tensor = make_np(tensor)
tensor = convert_to_HWC(tensor, dataformats)
# Do not assume that user passes in values in [0, 255], use data type to detect
scale_factor = _calc_scale_factor(tensor)
tensor = tensor.astype(np.float32)
tensor = (tensor * scale_factor).astype(np.uint8)
image = make_image(tensor, rescale=rescale)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def image_boxes(
tag, tensor_image, tensor_boxes, rescale=1, dataformats="CHW", labels=None
):
"""Outputs a `Summary` protocol buffer with images."""
tensor_image = make_np(tensor_image)
tensor_image = convert_to_HWC(tensor_image, dataformats)
tensor_boxes = make_np(tensor_boxes)
tensor_image = tensor_image.astype(np.float32) * _calc_scale_factor(tensor_image)
image = make_image(
tensor_image.astype(np.uint8), rescale=rescale, rois=tensor_boxes, labels=labels
)
return Summary(value=[Summary.Value(tag=tag, image=image)])
def draw_boxes(disp_image, boxes, labels=None):
# xyxy format
num_boxes = boxes.shape[0]
list_gt = range(num_boxes)
for i in list_gt:
disp_image = _draw_single_box(
disp_image,
boxes[i, 0],
boxes[i, 1],
boxes[i, 2],
boxes[i, 3],
display_str=None if labels is None else labels[i],
color="Red",
)
return disp_image
def make_image(tensor, rescale=1, rois=None, labels=None):
"""Convert a numpy representation of an image to Image protobuf"""
from PIL import Image
height, width, channel = tensor.shape
scaled_height = int(height * rescale)
scaled_width = int(width * rescale)
image = Image.fromarray(tensor)
if rois is not None:
image = draw_boxes(image, rois, labels=labels)
image = image.resize((scaled_width, scaled_height), Image.ANTIALIAS)
import io
output = io.BytesIO()
image.save(output, format="PNG")
image_string = output.getvalue()
output.close()
return Summary.Image(
height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string,
)
def video(tag, tensor, fps=4):
tensor = make_np(tensor)
tensor = _prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
scale_factor = _calc_scale_factor(tensor)
tensor = tensor.astype(np.float32)
tensor = (tensor * scale_factor).astype(np.uint8)
video = make_video(tensor, fps)
return Summary(value=[Summary.Value(tag=tag, image=video)])
def make_video(tensor, fps):
try:
import moviepy # noqa: F401
except ImportError:
print("add_video needs package moviepy")
return
try:
from moviepy import editor as mpy
except ImportError:
print(
"moviepy is installed, but can't import moviepy.editor.",
"Some packages could be missing [imageio, requests]",
)
return
import tempfile
t, h, w, c = tensor.shape
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
try: # newer version of moviepy use logger instead of progress_bar argument.
clip.write_gif(filename, verbose=False, logger=None)
except TypeError:
try: # older version of moviepy does not support progress_bar argument.
clip.write_gif(filename, verbose=False, progress_bar=False)
except TypeError:
clip.write_gif(filename, verbose=False)
with open(filename, "rb") as f:
tensor_string = f.read()
try:
os.remove(filename)
except OSError:
logger.warning("The temporary file used by moviepy cannot be deleted.")
return Summary.Image(
height=h, width=w, colorspace=c, encoded_image_string=tensor_string
)
def audio(tag, tensor, sample_rate=44100):
array = make_np(tensor)
array = array.squeeze()
if abs(array).max() > 1:
print("warning: audio amplitude out of range, auto clipped.")
array = array.clip(-1, 1)
assert array.ndim == 1, "input tensor should be 1 dimensional."
array = (array * np.iinfo(np.int16).max).astype("<i2")
import io
import wave
fio = io.BytesIO()
with wave.open(fio, "wb") as wave_write:
wave_write.setnchannels(1)
wave_write.setsampwidth(2)
wave_write.setframerate(sample_rate)
wave_write.writeframes(array.data)
audio_string = fio.getvalue()
fio.close()
audio = Summary.Audio(
sample_rate=sample_rate,
num_channels=1,
length_frames=array.shape[-1],
encoded_audio_string=audio_string,
content_type="audio/wav",
)
return Summary(value=[Summary.Value(tag=tag, audio=audio)])
def custom_scalars(layout):
categories = []
for k, v in layout.items():
charts = []
for chart_name, chart_meatadata in v.items():
tags = chart_meatadata[1]
if chart_meatadata[0] == "Margin":
assert len(tags) == 3
mgcc = layout_pb2.MarginChartContent(
series=[
layout_pb2.MarginChartContent.Series(
value=tags[0], lower=tags[1], upper=tags[2]
)
]
)
chart = layout_pb2.Chart(title=chart_name, margin=mgcc)
else:
mlcc = layout_pb2.MultilineChartContent(tag=tags)
chart = layout_pb2.Chart(title=chart_name, multiline=mlcc)
charts.append(chart)
categories.append(layout_pb2.Category(title=k, chart=charts))
layout = layout_pb2.Layout(category=categories)
plugin_data = SummaryMetadata.PluginData(plugin_name="custom_scalars")
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(
dtype="DT_STRING",
string_val=[layout.SerializeToString()],
tensor_shape=TensorShapeProto(),
)
return Summary(
value=[
Summary.Value(tag="custom_scalars__config__", tensor=tensor, metadata=smd)
]
)
def text(tag, text):
plugin_data = SummaryMetadata.PluginData(
plugin_name="text", content=TextPluginData(version=0).SerializeToString()
)
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(
dtype="DT_STRING",
string_val=[text.encode(encoding="utf_8")],
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]),
)
return Summary(
value=[Summary.Value(tag=tag + "/text_summary", metadata=smd, tensor=tensor)]
)
def pr_curve_raw(
tag, tp, fp, tn, fn, precision, recall, num_thresholds=127, weights=None
):
if num_thresholds > 127: # weird, value > 127 breaks protobuf
num_thresholds = 127
data = np.stack((tp, fp, tn, fn, precision, recall))
pr_curve_plugin_data = PrCurvePluginData(
version=0, num_thresholds=num_thresholds
).SerializeToString()
plugin_data = SummaryMetadata.PluginData(
plugin_name="pr_curves", content=pr_curve_plugin_data
)
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(
dtype="DT_FLOAT",
float_val=data.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=data.shape[0]),
TensorShapeProto.Dim(size=data.shape[1]),
]
),
)
return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
def pr_curve(tag, labels, predictions, num_thresholds=127, weights=None):
# weird, value > 127 breaks protobuf
num_thresholds = min(num_thresholds, 127)
data = compute_curve(
labels, predictions, num_thresholds=num_thresholds, weights=weights
)
pr_curve_plugin_data = PrCurvePluginData(
version=0, num_thresholds=num_thresholds
).SerializeToString()
plugin_data = SummaryMetadata.PluginData(
plugin_name="pr_curves", content=pr_curve_plugin_data
)
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(
dtype="DT_FLOAT",
float_val=data.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=data.shape[0]),
TensorShapeProto.Dim(size=data.shape[1]),
]
),
)
return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py
def compute_curve(labels, predictions, num_thresholds=None, weights=None):
_MINIMUM_COUNT = 1e-7
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float64)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights,
)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights,
)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return np.stack((tp, fp, tn, fn, precision, recall))
def _get_tensor_summary(
name, display_name, description, tensor, content_type, components, json_config
):
"""Creates a tensor summary with summary metadata.
Args:
name: Uniquely identifiable name of the summary op. Could be replaced by
combination of name and type to make it unique even outside of this
summary.
display_name: Will be used as the display name in TensorBoard.
Defaults to `name`.
description: A longform readable description of the summary data. Markdown
is supported.
tensor: Tensor to display in summary.
content_type: Type of content inside the Tensor.
components: Bitmask representing present parts (vertices, colors, etc.) that
belong to the summary.
json_config: A string, JSON-serialized dictionary of ThreeJS classes
configuration.
Returns:
Tensor summary with metadata.
"""
import torch
from tensorboard.plugins.mesh import metadata
tensor = torch.as_tensor(tensor)
tensor_metadata = metadata.create_summary_metadata(
name,
display_name,
content_type,
components,
tensor.shape,
description,
json_config=json_config,
)
tensor = TensorProto(
dtype="DT_FLOAT",
float_val=tensor.reshape(-1).tolist(),
tensor_shape=TensorShapeProto(
dim=[
TensorShapeProto.Dim(size=tensor.shape[0]),
TensorShapeProto.Dim(size=tensor.shape[1]),
TensorShapeProto.Dim(size=tensor.shape[2]),
]
),
)
tensor_summary = Summary.Value(
tag=metadata.get_instance_name(name, content_type),
tensor=tensor,
metadata=tensor_metadata,
)
return tensor_summary
def _get_json_config(config_dict):
"""Parses and returns JSON string from python dictionary."""
json_config = "{}"
if config_dict is not None:
json_config = json.dumps(config_dict, sort_keys=True)
return json_config
# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/mesh/summary.py
def mesh(
tag, vertices, colors, faces, config_dict, display_name=None, description=None
):
"""Outputs a merged `Summary` protocol buffer with a mesh/point cloud.
Args:
tag: A name for this summary operation.
vertices: Tensor of shape `[dim_1, ..., dim_n, 3]` representing the 3D
coordinates of vertices.
faces: Tensor of shape `[dim_1, ..., dim_n, 3]` containing indices of
vertices within each triangle.
colors: Tensor of shape `[dim_1, ..., dim_n, 3]` containing colors for each
vertex.
display_name: If set, will be used as the display name in TensorBoard.
Defaults to `name`.
description: A longform readable description of the summary data. Markdown
is supported.
config_dict: Dictionary with ThreeJS classes names and configuration.
Returns:
Merged summary for mesh/point cloud representation.
"""
from tensorboard.plugins.mesh.plugin_data_pb2 import MeshPluginData
from tensorboard.plugins.mesh import metadata
json_config = _get_json_config(config_dict)
summaries = []
tensors = [
(vertices, MeshPluginData.VERTEX),
(faces, MeshPluginData.FACE),
(colors, MeshPluginData.COLOR),
]
tensors = [tensor for tensor in tensors if tensor[0] is not None]
components = metadata.get_components_bitmask(
[content_type for (tensor, content_type) in tensors]
)
for tensor, content_type in tensors:
summaries.append(
_get_tensor_summary(
tag,
display_name,
description,
tensor,
content_type,
components,
json_config,
)
)
return Summary(value=summaries)
|
pytorch-master
|
torch/utils/tensorboard/summary.py
|
"""
This module converts objects into numpy array.
"""
import numpy as np
import torch
def make_np(x):
"""
Args:
x: An instance of torch tensor or caffe blob name
Returns:
numpy.array: Numpy array
"""
if isinstance(x, np.ndarray):
return x
if isinstance(x, str): # Caffe2 will pass name of blob(s) to fetch
return _prepare_caffe2(x)
if np.isscalar(x):
return np.array([x])
if isinstance(x, torch.Tensor):
return _prepare_pytorch(x)
raise NotImplementedError(
"Got {}, but numpy array, torch tensor, or caffe2 blob name are expected.".format(
type(x)
)
)
def _prepare_pytorch(x):
x = x.detach().cpu().numpy()
return x
def _prepare_caffe2(x):
from caffe2.python import workspace
x = workspace.FetchBlob(x)
return x
|
pytorch-master
|
torch/utils/tensorboard/_convert_np.py
|
"""Provides an API for writing protocol buffers to event files to be
consumed by TensorBoard for visualization."""
import os
import time
import torch
from tensorboard.compat import tf
from tensorboard.compat.proto.event_pb2 import SessionLog
from tensorboard.compat.proto.event_pb2 import Event
from tensorboard.compat.proto import event_pb2
from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig
from tensorboard.summary.writer.event_file_writer import EventFileWriter
from ._convert_np import make_np
from ._embedding import (
make_mat,
make_sprite,
make_tsv,
write_pbtxt,
get_embedding_info,
)
from ._onnx_graph import load_onnx_graph
from ._pytorch_graph import graph
from ._utils import figure_to_image
from .summary import (
scalar,
histogram,
histogram_raw,
image,
audio,
text,
pr_curve,
pr_curve_raw,
video,
custom_scalars,
image_boxes,
mesh,
hparams,
)
__all__ = ['FileWriter', 'SummaryWriter']
class FileWriter(object):
"""Writes protocol buffers to event files to be consumed by TensorBoard.
The `FileWriter` class provides a mechanism to create an event file in a
given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
"""
def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=""):
"""Creates a `FileWriter` and an event file.
On construction the writer creates a new event file in `log_dir`.
The other arguments to the constructor control the asynchronous writes to
the event file.
Args:
log_dir: A string. Directory where event file will be written.
max_queue: Integer. Size of the queue for pending events and
summaries before one of the 'add' calls forces a flush to disk.
Default is ten items.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk. Default is every two minutes.
filename_suffix: A string. Suffix added to all event filenames
in the log_dir directory. More details on filename construction in
tensorboard.summary.writer.event_file_writer.EventFileWriter.
"""
# Sometimes PosixPath is passed in and we need to coerce it to
# a string in all cases
# TODO: See if we can remove this in the future if we are
# actually the ones passing in a PosixPath
log_dir = str(log_dir)
self.event_writer = EventFileWriter(
log_dir, max_queue, flush_secs, filename_suffix
)
def get_logdir(self):
"""Returns the directory where event file will be written."""
return self.event_writer.get_logdir()
def add_event(self, event, step=None, walltime=None):
"""Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
step: Number. Optional global step value for training process
to record with the event.
walltime: float. Optional walltime to override the default (current)
walltime (from time.time()) seconds after epoch
"""
event.wall_time = time.time() if walltime is None else walltime
if step is not None:
# Make sure step is converted from numpy or other formats
# since protobuf might not convert depending on version
event.step = int(step)
self.event_writer.add_event(event)
def add_summary(self, summary, global_step=None, walltime=None):
"""Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer
and adds it to the event file.
Args:
summary: A `Summary` protocol buffer.
global_step: Number. Optional global step value for training process
to record with the summary.
walltime: float. Optional walltime to override the default (current)
walltime (from time.time()) seconds after epoch
"""
event = event_pb2.Event(summary=summary)
self.add_event(event, global_step, walltime)
def add_graph(self, graph_profile, walltime=None):
"""Adds a `Graph` and step stats protocol buffer to the event file.
Args:
graph_profile: A `Graph` and step stats protocol buffer.
walltime: float. Optional walltime to override the default (current)
walltime (from time.time()) seconds after epoch
"""
graph = graph_profile[0]
stepstats = graph_profile[1]
event = event_pb2.Event(graph_def=graph.SerializeToString())
self.add_event(event, None, walltime)
trm = event_pb2.TaggedRunMetadata(
tag="step1", run_metadata=stepstats.SerializeToString()
)
event = event_pb2.Event(tagged_run_metadata=trm)
self.add_event(event, None, walltime)
def add_onnx_graph(self, graph, walltime=None):
"""Adds a `Graph` protocol buffer to the event file.
Args:
graph: A `Graph` protocol buffer.
walltime: float. Optional walltime to override the default (current)
_get_file_writerfrom time.time())
"""
event = event_pb2.Event(graph_def=graph.SerializeToString())
self.add_event(event, None, walltime)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self.event_writer.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self.event_writer.close()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
self.event_writer.reopen()
class SummaryWriter(object):
"""Writes entries directly to event files in the log_dir to be
consumed by TensorBoard.
The `SummaryWriter` class provides a high-level API to create an event file
in a given directory and add summaries and events to it. The class updates the
file contents asynchronously. This allows a training program to call methods
to add data to the file directly from the training loop, without slowing down
training.
"""
def __init__(
self,
log_dir=None,
comment="",
purge_step=None,
max_queue=10,
flush_secs=120,
filename_suffix="",
):
"""Creates a `SummaryWriter` that will write out events and summaries
to the event file.
Args:
log_dir (str): Save directory location. Default is
runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run.
Use hierarchical folder structure to compare
between runs easily. e.g. pass in 'runs/exp1', 'runs/exp2', etc.
for each new experiment to compare across them.
comment (str): Comment log_dir suffix appended to the default
``log_dir``. If ``log_dir`` is assigned, this argument has no effect.
purge_step (int):
When logging crashes at step :math:`T+X` and restarts at step :math:`T`,
any events whose global_step larger or equal to :math:`T` will be
purged and hidden from TensorBoard.
Note that crashed and resumed experiments should have the same ``log_dir``.
max_queue (int): Size of the queue for pending events and
summaries before one of the 'add' calls forces a flush to disk.
Default is ten items.
flush_secs (int): How often, in seconds, to flush the
pending events and summaries to disk. Default is every two minutes.
filename_suffix (str): Suffix added to all event filenames in
the log_dir directory. More details on filename construction in
tensorboard.summary.writer.event_file_writer.EventFileWriter.
Examples::
from torch.utils.tensorboard import SummaryWriter
# create a summary writer with automatically generated folder name.
writer = SummaryWriter()
# folder location: runs/May04_22-14-54_s-MacBook-Pro.local/
# create a summary writer using the specified folder name.
writer = SummaryWriter("my_experiment")
# folder location: my_experiment
# create a summary writer with comment appended.
writer = SummaryWriter(comment="LR_0.1_BATCH_16")
# folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/
"""
torch._C._log_api_usage_once("tensorboard.create.summarywriter")
if not log_dir:
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
log_dir = os.path.join(
"runs", current_time + "_" + socket.gethostname() + comment
)
self.log_dir = log_dir
self.purge_step = purge_step
self.max_queue = max_queue
self.flush_secs = flush_secs
self.filename_suffix = filename_suffix
# Initialize the file writers, but they can be cleared out on close
# and recreated later as needed.
self.file_writer = self.all_writers = None
self._get_file_writer()
# Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard
v = 1e-12
buckets = []
neg_buckets = []
while v < 1e20:
buckets.append(v)
neg_buckets.append(-v)
v *= 1.1
self.default_bins = neg_buckets[::-1] + [0] + buckets
def _check_caffe2_blob(self, item):
"""
Caffe2 users have the option of passing a string representing the name of
a blob in the workspace instead of passing the actual Tensor/array containing
the numeric values. Thus, we need to check if we received a string as input
instead of an actual Tensor/array, and if so, we need to fetch the Blob
from the workspace corresponding to that name. Fetching can be done with the
following:
from caffe2.python import workspace (if not already imported)
workspace.FetchBlob(blob_name)
workspace.FetchBlobs([blob_name1, blob_name2, ...])
"""
return isinstance(item, str)
def _get_file_writer(self):
"""Returns the default FileWriter instance. Recreates it if closed."""
if self.all_writers is None or self.file_writer is None:
self.file_writer = FileWriter(
self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix
)
self.all_writers = {self.file_writer.get_logdir(): self.file_writer}
if self.purge_step is not None:
most_recent_step = self.purge_step
self.file_writer.add_event(
Event(step=most_recent_step, file_version="brain.Event:2")
)
self.file_writer.add_event(
Event(
step=most_recent_step,
session_log=SessionLog(status=SessionLog.START),
)
)
self.purge_step = None
return self.file_writer
def get_logdir(self):
"""Returns the directory where event files will be written."""
return self.log_dir
def add_hparams(
self, hparam_dict, metric_dict, hparam_domain_discrete=None, run_name=None
):
"""Add a set of hyperparameters to be compared in TensorBoard.
Args:
hparam_dict (dict): Each key-value pair in the dictionary is the
name of the hyper parameter and it's corresponding value.
The type of the value can be one of `bool`, `string`, `float`,
`int`, or `None`.
metric_dict (dict): Each key-value pair in the dictionary is the
name of the metric and it's corresponding value. Note that the key used
here should be unique in the tensorboard record. Otherwise the value
you added by ``add_scalar`` will be displayed in hparam plugin. In most
cases, this is unwanted.
hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that
contains names of the hyperparameters and all discrete values they can hold
run_name (str): Name of the run, to be included as part of the logdir.
If unspecified, will use current timestamp.
Examples::
from torch.utils.tensorboard import SummaryWriter
with SummaryWriter() as w:
for i in range(5):
w.add_hparams({'lr': 0.1*i, 'bsize': i},
{'hparam/accuracy': 10*i, 'hparam/loss': 10*i})
Expected result:
.. image:: _static/img/tensorboard/add_hparam.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError("hparam_dict and metric_dict should be dictionary.")
exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete)
if not run_name:
run_name = str(time.time())
logdir = os.path.join(self._get_file_writer().get_logdir(), run_name)
with SummaryWriter(log_dir=logdir) as w_hp:
w_hp.file_writer.add_summary(exp)
w_hp.file_writer.add_summary(ssi)
w_hp.file_writer.add_summary(sei)
for k, v in metric_dict.items():
w_hp.add_scalar(k, v)
def add_scalar(
self,
tag,
scalar_value,
global_step=None,
walltime=None,
new_style=False,
double_precision=False,
):
"""Add scalar data to summary.
Args:
tag (str): Data identifier
scalar_value (float or string/blobname): Value to save
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
with seconds after epoch of event
new_style (boolean): Whether to use new style (tensor field) or old
style (simple_value field). New style could lead to faster data loading.
Examples::
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
x = range(100)
for i in x:
writer.add_scalar('y=2x', i * 2, i)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_scalar.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_scalar")
if self._check_caffe2_blob(scalar_value):
from caffe2.python import workspace
scalar_value = workspace.FetchBlob(scalar_value)
summary = scalar(
tag, scalar_value, new_style=new_style, double_precision=double_precision
)
self._get_file_writer().add_summary(summary, global_step, walltime)
def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None):
"""Adds many scalar data to summary.
Args:
main_tag (str): The parent name for the tags
tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
r = 5
for i in range(100):
writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r),
'xcosx':i*np.cos(i/r),
'tanx': np.tan(i/r)}, i)
writer.close()
# This call adds three values to the same scalar plot with the tag
# 'run_14h' in TensorBoard's scalar section.
Expected result:
.. image:: _static/img/tensorboard/add_scalars.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_scalars")
walltime = time.time() if walltime is None else walltime
fw_logdir = self._get_file_writer().get_logdir()
for tag, scalar_value in tag_scalar_dict.items():
fw_tag = fw_logdir + "/" + main_tag.replace("/", "_") + "_" + tag
assert self.all_writers is not None
if fw_tag in self.all_writers.keys():
fw = self.all_writers[fw_tag]
else:
fw = FileWriter(
fw_tag, self.max_queue, self.flush_secs, self.filename_suffix
)
self.all_writers[fw_tag] = fw
if self._check_caffe2_blob(scalar_value):
from caffe2.python import workspace
scalar_value = workspace.FetchBlob(scalar_value)
fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime)
def add_histogram(
self,
tag,
values,
global_step=None,
bins="tensorflow",
walltime=None,
max_bins=None,
):
"""Add histogram to summary.
Args:
tag (str): Data identifier
values (torch.Tensor, numpy.ndarray, or string/blobname): Values to build histogram
global_step (int): Global step value to record
bins (str): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find
other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
for i in range(10):
x = np.random.random(1000)
writer.add_histogram('distribution centers', x + i, i)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_histogram.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_histogram")
if self._check_caffe2_blob(values):
from caffe2.python import workspace
values = workspace.FetchBlob(values)
if isinstance(bins, str) and bins == "tensorflow":
bins = self.default_bins
self._get_file_writer().add_summary(
histogram(tag, values, bins, max_bins=max_bins), global_step, walltime
)
def add_histogram_raw(
self,
tag,
min,
max,
num,
sum,
sum_squares,
bucket_limits,
bucket_counts,
global_step=None,
walltime=None,
):
"""Adds histogram with raw data.
Args:
tag (str): Data identifier
min (float or int): Min value
max (float or int): Max value
num (int): Number of values
sum (float or int): Sum of all values
sum_squares (float or int): Sum of squares for all values
bucket_limits (torch.Tensor, numpy.ndarray): Upper value per bucket.
The number of elements of it should be the same as `bucket_counts`.
bucket_counts (torch.Tensor, numpy.ndarray): Number of values per bucket
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/README.md
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter()
dummy_data = []
for idx, value in enumerate(range(50)):
dummy_data += [idx + 0.001] * value
bins = list(range(50+2))
bins = np.array(bins)
values = np.array(dummy_data).astype(float).reshape(-1)
counts, limits = np.histogram(values, bins=bins)
sum_sq = values.dot(values)
writer.add_histogram_raw(
tag='histogram_with_raw_data',
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limits=limits[1:].tolist(),
bucket_counts=counts.tolist(),
global_step=0)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_histogram_raw.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_histogram_raw")
if len(bucket_limits) != len(bucket_counts):
raise ValueError(
"len(bucket_limits) != len(bucket_counts), see the document."
)
self._get_file_writer().add_summary(
histogram_raw(
tag, min, max, num, sum, sum_squares, bucket_limits, bucket_counts
),
global_step,
walltime,
)
def add_image(
self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW"
):
"""Add image data to summary.
Note that this requires the ``pillow`` package.
Args:
tag (str): Data identifier
img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
dataformats (str): Image data format specification of the form
CHW, HWC, HW, WH, etc.
Shape:
img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to
convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job.
Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as
corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``.
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
img = np.zeros((3, 100, 100))
img[0] = np.arange(0, 10000).reshape(100, 100) / 10000
img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000
img_HWC = np.zeros((100, 100, 3))
img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000
img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000
writer = SummaryWriter()
writer.add_image('my_image', img, 0)
# If you have non-default dimension setting, set the dataformats argument.
writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC')
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_image.png
:scale: 50 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_image")
if self._check_caffe2_blob(img_tensor):
from caffe2.python import workspace
img_tensor = workspace.FetchBlob(img_tensor)
self._get_file_writer().add_summary(
image(tag, img_tensor, dataformats=dataformats), global_step, walltime
)
def add_images(
self, tag, img_tensor, global_step=None, walltime=None, dataformats="NCHW"
):
"""Add batched image data to summary.
Note that this requires the ``pillow`` package.
Args:
tag (str): Data identifier
img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
dataformats (str): Image data format specification of the form
NCHW, NHWC, CHW, HWC, HW, WH, etc.
Shape:
img_tensor: Default is :math:`(N, 3, H, W)`. If ``dataformats`` is specified, other shape will be
accepted. e.g. NCHW or NHWC.
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
img_batch = np.zeros((16, 3, 100, 100))
for i in range(16):
img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i
img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i
writer = SummaryWriter()
writer.add_images('my_image_batch', img_batch, 0)
writer.close()
Expected result:
.. image:: _static/img/tensorboard/add_images.png
:scale: 30 %
"""
torch._C._log_api_usage_once("tensorboard.logging.add_images")
if self._check_caffe2_blob(img_tensor):
from caffe2.python import workspace
img_tensor = workspace.FetchBlob(img_tensor)
self._get_file_writer().add_summary(
image(tag, img_tensor, dataformats=dataformats), global_step, walltime
)
def add_image_with_boxes(
self,
tag,
img_tensor,
box_tensor,
global_step=None,
walltime=None,
rescale=1,
dataformats="CHW",
labels=None,
):
"""Add image and draw bounding boxes on the image.
Args:
tag (str): Data identifier
img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data
box_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Box data (for detected objects)
box should be represented as [x1, y1, x2, y2].
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
rescale (float): Optional scale override
dataformats (str): Image data format specification of the form
NCHW, NHWC, CHW, HWC, HW, WH, etc.
labels (list of string): The label to be shown for each bounding box.
Shape:
img_tensor: Default is :math:`(3, H, W)`. It can be specified with ``dataformats`` argument.
e.g. CHW or HWC
box_tensor: (torch.Tensor, numpy.ndarray, or string/blobname): NX4, where N is the number of
boxes and each 4 elements in a row represents (xmin, ymin, xmax, ymax).
"""
torch._C._log_api_usage_once("tensorboard.logging.add_image_with_boxes")
if self._check_caffe2_blob(img_tensor):
from caffe2.python import workspace
img_tensor = workspace.FetchBlob(img_tensor)
if self._check_caffe2_blob(box_tensor):
from caffe2.python import workspace
box_tensor = workspace.FetchBlob(box_tensor)
if labels is not None:
if isinstance(labels, str):
labels = [labels]
if len(labels) != box_tensor.shape[0]:
labels = None
self._get_file_writer().add_summary(
image_boxes(
tag,
img_tensor,
box_tensor,
rescale=rescale,
dataformats=dataformats,
labels=labels,
),
global_step,
walltime,
)
def add_figure(self, tag, figure, global_step=None, close=True, walltime=None):
"""Render matplotlib figure into an image and add it to summary.
Note that this requires the ``matplotlib`` package.
Args:
tag (str): Data identifier
figure (matplotlib.pyplot.figure) or list of figures: Figure or a list of figures
global_step (int): Global step value to record
close (bool): Flag to automatically close the figure
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
"""
torch._C._log_api_usage_once("tensorboard.logging.add_figure")
if isinstance(figure, list):
self.add_image(
tag,
figure_to_image(figure, close),
global_step,
walltime,
dataformats="NCHW",
)
else:
self.add_image(
tag,
figure_to_image(figure, close),
global_step,
walltime,
dataformats="CHW",
)
def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None):
"""Add video data to summary.
Note that this requires the ``moviepy`` package.
Args:
tag (str): Data identifier
vid_tensor (torch.Tensor): Video data
global_step (int): Global step value to record
fps (float or int): Frames per second
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Shape:
vid_tensor: :math:`(N, T, C, H, W)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`.
"""
torch._C._log_api_usage_once("tensorboard.logging.add_video")
self._get_file_writer().add_summary(
video(tag, vid_tensor, fps), global_step, walltime
)
def add_audio(
self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None
):
"""Add audio data to summary.
Args:
tag (str): Data identifier
snd_tensor (torch.Tensor): Sound data
global_step (int): Global step value to record
sample_rate (int): sample rate in Hz
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Shape:
snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1].
"""
torch._C._log_api_usage_once("tensorboard.logging.add_audio")
if self._check_caffe2_blob(snd_tensor):
from caffe2.python import workspace
snd_tensor = workspace.FetchBlob(snd_tensor)
self._get_file_writer().add_summary(
audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime
)
def add_text(self, tag, text_string, global_step=None, walltime=None):
"""Add text data to summary.
Args:
tag (str): Data identifier
text_string (str): String to save
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
writer.add_text('lstm', 'This is an lstm', 0)
writer.add_text('rnn', 'This is an rnn', 10)
"""
torch._C._log_api_usage_once("tensorboard.logging.add_text")
self._get_file_writer().add_summary(
text(tag, text_string), global_step, walltime
)
def add_onnx_graph(self, prototxt):
torch._C._log_api_usage_once("tensorboard.logging.add_onnx_graph")
self._get_file_writer().add_onnx_graph(load_onnx_graph(prototxt))
def add_graph(
self, model, input_to_model=None, verbose=False, use_strict_trace=True
):
"""Add graph data to summary.
Args:
model (torch.nn.Module): Model to draw.
input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of
variables to be fed.
verbose (bool): Whether to print graph structure in console.
use_strict_trace (bool): Whether to pass keyword argument `strict` to
`torch.jit.trace`. Pass False when you want the tracer to
record your mutable container types (list, dict)
"""
torch._C._log_api_usage_once("tensorboard.logging.add_graph")
if hasattr(model, "forward"):
# A valid PyTorch model should have a 'forward' method
self._get_file_writer().add_graph(
graph(model, input_to_model, verbose, use_strict_trace)
)
else:
# Caffe2 models do not have the 'forward' method
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from ._caffe2_graph import (
model_to_graph_def,
nets_to_graph_def,
protos_to_graph_def,
)
if isinstance(model, list):
if isinstance(model[0], core.Net):
current_graph = nets_to_graph_def(model)
elif isinstance(model[0], caffe2_pb2.NetDef):
current_graph = protos_to_graph_def(model)
else:
# Handles cnn.CNNModelHelper, model_helper.ModelHelper
current_graph = model_to_graph_def(model)
event = event_pb2.Event(graph_def=current_graph.SerializeToString())
self._get_file_writer().add_event(event)
@staticmethod
def _encode(rawstr):
# I'd use urllib but, I'm unsure about the differences from python3 to python2, etc.
retval = rawstr
retval = retval.replace("%", "%%%02x" % (ord("%")))
retval = retval.replace("/", "%%%02x" % (ord("/")))
retval = retval.replace("\\", "%%%02x" % (ord("\\")))
return retval
def add_embedding(
self,
mat,
metadata=None,
label_img=None,
global_step=None,
tag="default",
metadata_header=None,
):
"""Add embedding projector data to summary.
Args:
mat (torch.Tensor or numpy.ndarray): A matrix which each row is the feature vector of the data point
metadata (list): A list of labels, each element will be convert to string
label_img (torch.Tensor): Images correspond to each data point
global_step (int): Global step value to record
tag (str): Name for the embedding
Shape:
mat: :math:`(N, D)`, where N is number of data and D is feature dimension
label_img: :math:`(N, C, H, W)`
Examples::
import keyword
import torch
meta = []
while len(meta)<100:
meta = meta+keyword.kwlist # get some strings
meta = meta[:100]
for i, v in enumerate(meta):
meta[i] = v+str(i)
label_img = torch.rand(100, 3, 10, 32)
for i in range(100):
label_img[i]*=i/100.0
writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img)
writer.add_embedding(torch.randn(100, 5), label_img=label_img)
writer.add_embedding(torch.randn(100, 5), metadata=meta)
"""
torch._C._log_api_usage_once("tensorboard.logging.add_embedding")
mat = make_np(mat)
if global_step is None:
global_step = 0
# clear pbtxt?
# Maybe we should encode the tag so slashes don't trip us up?
# I don't think this will mess us up, but better safe than sorry.
subdir = "%s/%s" % (str(global_step).zfill(5), self._encode(tag))
save_path = os.path.join(self._get_file_writer().get_logdir(), subdir)
fs = tf.io.gfile.get_filesystem(save_path)
if fs.exists(save_path):
if fs.isdir(save_path):
print(
"warning: Embedding dir exists, did you set global_step for add_embedding()?"
)
else:
raise Exception(
"Path: `%s` exists, but is a file. Cannot proceed." % save_path
)
else:
fs.makedirs(save_path)
if metadata is not None:
assert mat.shape[0] == len(
metadata
), "#labels should equal with #data points"
make_tsv(metadata, save_path, metadata_header=metadata_header)
if label_img is not None:
assert (
mat.shape[0] == label_img.shape[0]
), "#images should equal with #data points"
make_sprite(label_img, save_path)
assert (
mat.ndim == 2
), "mat should be 2D, where mat.size(0) is the number of data points"
make_mat(mat, save_path)
# Filesystem doesn't necessarily have append semantics, so we store an
# internal buffer to append to and re-write whole file after each
# embedding is added
if not hasattr(self, "_projector_config"):
self._projector_config = ProjectorConfig()
embedding_info = get_embedding_info(
metadata, label_img, fs, subdir, global_step, tag
)
self._projector_config.embeddings.extend([embedding_info])
from google.protobuf import text_format
config_pbtxt = text_format.MessageToString(self._projector_config)
write_pbtxt(self._get_file_writer().get_logdir(), config_pbtxt)
def add_pr_curve(
self,
tag,
labels,
predictions,
global_step=None,
num_thresholds=127,
weights=None,
walltime=None,
):
"""Adds precision recall curve.
Plotting a precision-recall curve lets you understand your model's
performance under different threshold settings. With this function,
you provide the ground truth labeling (T/F) and prediction confidence
(usually the output of your model) for each target. The TensorBoard UI
will let you choose the threshold interactively.
Args:
tag (str): Data identifier
labels (torch.Tensor, numpy.ndarray, or string/blobname):
Ground truth data. Binary label for each element.
predictions (torch.Tensor, numpy.ndarray, or string/blobname):
The probability that an element be classified as true.
Value should be in [0, 1]
global_step (int): Global step value to record
num_thresholds (int): Number of thresholds used to draw the curve.
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Examples::
from torch.utils.tensorboard import SummaryWriter
import numpy as np
labels = np.random.randint(2, size=100) # binary label
predictions = np.random.rand(100)
writer = SummaryWriter()
writer.add_pr_curve('pr_curve', labels, predictions, 0)
writer.close()
"""
torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve")
labels, predictions = make_np(labels), make_np(predictions)
self._get_file_writer().add_summary(
pr_curve(tag, labels, predictions, num_thresholds, weights),
global_step,
walltime,
)
def add_pr_curve_raw(
self,
tag,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
global_step=None,
num_thresholds=127,
weights=None,
walltime=None,
):
"""Adds precision recall curve with raw data.
Args:
tag (str): Data identifier
true_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): true positive counts
false_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): false positive counts
true_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): true negative counts
false_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): false negative counts
precision (torch.Tensor, numpy.ndarray, or string/blobname): precision
recall (torch.Tensor, numpy.ndarray, or string/blobname): recall
global_step (int): Global step value to record
num_thresholds (int): Number of thresholds used to draw the curve.
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md
"""
torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve_raw")
self._get_file_writer().add_summary(
pr_curve_raw(
tag,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds,
weights,
),
global_step,
walltime,
)
def add_custom_scalars_multilinechart(
self, tags, category="default", title="untitled"
):
"""Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument
is *tags*.
Args:
tags (list): list of tags that have been used in ``add_scalar()``
Examples::
writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330'])
"""
torch._C._log_api_usage_once(
"tensorboard.logging.add_custom_scalars_multilinechart"
)
layout = {category: {title: ["Multiline", tags]}}
self._get_file_writer().add_summary(custom_scalars(layout))
def add_custom_scalars_marginchart(
self, tags, category="default", title="untitled"
):
"""Shorthand for creating marginchart. Similar to ``add_custom_scalars()``, but the only necessary argument
is *tags*, which should have exactly 3 elements.
Args:
tags (list): list of tags that have been used in ``add_scalar()``
Examples::
writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006'])
"""
torch._C._log_api_usage_once(
"tensorboard.logging.add_custom_scalars_marginchart"
)
assert len(tags) == 3
layout = {category: {title: ["Margin", tags]}}
self._get_file_writer().add_summary(custom_scalars(layout))
def add_custom_scalars(self, layout):
"""Create special chart by collecting charts tags in 'scalars'. Note that this function can only be called once
for each SummaryWriter() object. Because it only provides metadata to tensorboard, the function can be called
before or after the training loop.
Args:
layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary
{chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type
(one of **Multiline** or **Margin**) and the second element should be a list containing the tags
you have used in add_scalar function, which will be collected into the new chart.
Examples::
layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]},
'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']],
'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}}
writer.add_custom_scalars(layout)
"""
torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars")
self._get_file_writer().add_summary(custom_scalars(layout))
def add_mesh(
self,
tag,
vertices,
colors=None,
faces=None,
config_dict=None,
global_step=None,
walltime=None,
):
"""Add meshes or 3D point clouds to TensorBoard. The visualization is based on Three.js,
so it allows users to interact with the rendered object. Besides the basic definitions
such as vertices, faces, users can further provide camera parameter, lighting condition, etc.
Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for
advanced usage.
Args:
tag (str): Data identifier
vertices (torch.Tensor): List of the 3D coordinates of vertices.
colors (torch.Tensor): Colors for each vertex
faces (torch.Tensor): Indices of vertices within each triangle. (Optional)
config_dict: Dictionary with ThreeJS classes names and configuration.
global_step (int): Global step value to record
walltime (float): Optional override default walltime (time.time())
seconds after epoch of event
Shape:
vertices: :math:`(B, N, 3)`. (batch, number_of_vertices, channels)
colors: :math:`(B, N, 3)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`.
faces: :math:`(B, N, 3)`. The values should lie in [0, number_of_vertices] for type `uint8`.
Examples::
from torch.utils.tensorboard import SummaryWriter
vertices_tensor = torch.as_tensor([
[1, 1, 1],
[-1, -1, 1],
[1, -1, -1],
[-1, 1, -1],
], dtype=torch.float).unsqueeze(0)
colors_tensor = torch.as_tensor([
[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[255, 0, 255],
], dtype=torch.int).unsqueeze(0)
faces_tensor = torch.as_tensor([
[0, 2, 3],
[0, 3, 1],
[0, 1, 2],
[1, 3, 2],
], dtype=torch.int).unsqueeze(0)
writer = SummaryWriter()
writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor)
writer.close()
"""
torch._C._log_api_usage_once("tensorboard.logging.add_mesh")
self._get_file_writer().add_summary(
mesh(tag, vertices, colors, faces, config_dict), global_step, walltime
)
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
if self.all_writers is None:
return
for writer in self.all_writers.values():
writer.flush()
def close(self):
if self.all_writers is None:
return # ignore double close
for writer in self.all_writers.values():
writer.flush()
writer.close()
self.file_writer = self.all_writers = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
|
pytorch-master
|
torch/utils/tensorboard/writer.py
|
import copy
import logging
import os
import re
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto
from builtins import bytes
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from typing import Set, Dict, Tuple, List
def _make_unique_name(seen: Set[str], name: str, min_version: int = 0):
"""
Make the name unique by appending a unique number to the name. Used for SSA.
Args:
seen (set): Set of names that have already been used (with respect to
some context).
name (str): The name to make unique
min_version (number): Starting index. Is incremented continually until
it can make the resulting name unique relative to 'seen'.
Returns:
x (str): A version of name that is not in seen.
"""
assert name is not None
i = min_version
x = "%s_%d" % (name, i) if i else name
while x in seen:
i += 1
x = "%s_%d" % (name, i)
seen.add(x)
return x
def _rename_tensorflow_style(shapes, blob_name_tracker, ops):
"""
Convert some of the common names in Caffe2 to tensorflow.
NOTE: The common names in both Caffe2 and Tensorflow are currently
hardcoded, if either side changes at some point, then this code should
change as well.
Args:
shapes: Dictionary mapping blob names to their shapes/dimensions.
blob_name_tracker: Dictionary of all unique blob names (with respect to
some context).
ops: List of Caffe2 operators
Returns:
None. The _rename_all() call modifies blob_name_tracker and ops in-place.
"""
WEIGHT = re.compile(r"(_w)$")
WEIGHT_ = re.compile(r"(_w_)")
BN = re.compile(r"(_bn)$")
BN_ = re.compile(r"(_bn_)")
BIAS = re.compile(r"(_b)$")
BIAS_ = re.compile(r"(_b_)")
SCALE = re.compile(r"(_s)$")
SCALE_ = re.compile(r"(_s_)")
SUM = re.compile(r"(_sum)$")
SUM_ = re.compile(r"(_sum_)")
BRANCH = re.compile(r"(_branch)")
def f(name):
inter_name = WEIGHT_.sub("/weight_", WEIGHT.sub("/weight", name))
inter_name = BN_.sub("/batchnorm_", BN.sub("/batchnorm", inter_name))
inter_name = BIAS_.sub("/bias_", BIAS.sub("/bias", inter_name))
inter_name = SCALE_.sub("/scale_", SCALE.sub("/scale", inter_name))
inter_name = SUM_.sub("/sum_", SUM.sub("/sum", inter_name))
new_name = BRANCH.sub("/branch", inter_name)
return new_name
_rename_all(shapes, blob_name_tracker, ops, f)
def _convert_to_ssa(shapes, blob_name_tracker, ops):
"""
Convert an operator graph to SSA (i.e. out-of-place).
i.e. blobs will be renamed so that each blob is produced only once.
Args:
shapes: Dictionary mapping blob names to their shapes/dimensions.
blob_name_tracker: Dictionary of all unique blob names (with respect to
some context).
ops: List of Caffe2 operators
Returns:
None. Modifies blob_name_tracker and ops in-place.
"""
ir = core.IR(ops)
seen: Set[str] = set()
versioned: Dict[Tuple[str, int], int] = {}
new_shapes = {}
new_blob_name_tracker = {}
def ssa_name(name: str, versions: Dict[str, int]) -> int:
assert name in versions
version = versions[name]
if (name, version) in versioned:
return versioned[(name, version)]
# Always setting name2 = `{name}_{version}` would work, but we also try
# to avoid a trailing `_0`, so we have to be careful not to introduce
# name collisions, such as (foo_1, 0) = foo_1 = (foo, 1).
# Note: operator names (if any) will be handled later.
new_name = _make_unique_name(seen, name, min_version=version)
versioned[(name, version)] = new_name
# Transfer shape.
if name in shapes:
new_shapes[new_name] = shapes[name]
if blob_name_tracker and name in blob_name_tracker:
new_blob_name_tracker[new_name] = blob_name_tracker[name]
return new_name
for (op, ssa) in zip(ops, ir.ssa):
assert op is ssa.op
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs)
op.output.extend(ssa_name(name, ssa.out_versions) for name in outputs)
shapes.clear()
shapes.update(new_shapes)
if blob_name_tracker:
blob_name_tracker.clear()
blob_name_tracker.update(new_blob_name_tracker)
def _get_blob_names(ops):
"""
Get all the operator input and output blobs and perform dedup on their names.
Args:
ops: List of Caffe2 operators to extract inputs and outputs from
Returns:
set containing distinct inputs and outputs from 'ops'
"""
names = set()
for op in ops:
names.update(op.input)
names.update(op.output)
return {name: name for name in names}
def _remap_keys(old_dict, rename_fn):
"""
Rename keys of 'old_dict' according to 'rename_fn'.
Args:
old_dict: Dictionary (i.e. containing blob_name -> blob_name
relationships.)
remap_fn: Function string -> string for renaming.
Returns:
None. Modifies old_dict in-place.
"""
new_dict = {rename_fn(key): value for key, value in old_dict.items()}
old_dict.clear()
old_dict.update(new_dict)
def _rename_all(shapes, blob_name_tracker, ops, rename_fn):
"""
Rename all the names in the operators.
Args:
shapes: Dictionary mapping blob names to their shapes/dimensions.
blob_name_tracker: Dictionary of all unique blob names (with respect to
some context).
ops: List of Caffe2 operators
rename_fn: Function string -> string that specifies how to rename
Returns:
None. Modifies shapes, blob_name_tracker and ops in-place using the
specified 'rename_fn'.
"""
seen: Set[str] = set()
renamed: Dict[Tuple[str, int], int] = {}
def g(name):
"""Collision-free version of f."""
if name is None:
return None
if name in renamed:
return renamed[name]
new_name = _make_unique_name(seen, rename_fn(name))
renamed[name] = new_name
return new_name
for op in ops:
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
op.input.extend(g(name) for name in inputs)
op.output.extend(g(name) for name in outputs)
_remap_keys(shapes, g)
if blob_name_tracker:
_remap_keys(blob_name_tracker, g)
# Rename all operator names (if any) independently so that the
# unique-fication happens only once in _fill_missing_operator_names().
seen.clear()
renamed.clear()
for op in ops:
op.name = g(op.name)
def _add_gradient_scope(shapes, blob_name_tracker, ops):
"""
For all operators or blobs with name containing "_grad", add a
"GRADIENTS/" scope.
Note: breaks graph execution since the blob -> gradient mapping is
hardcoded.
Args:
shapes: Dictionary mapping blob names to their shapes/dimensions.
blob_name_tracker: Dictionary of all unique blob names (with respect to
some context).
ops: List of Caffe2 operators
Returns:
None. Modifies shapes, blob_name_tracker and ops in-place by renaming.
"""
def f(name):
if "_grad" in name:
return "GRADIENTS/{}".format(name)
else:
return name
_rename_all(shapes, blob_name_tracker, ops, f)
def _replace_colons(shapes, blob_name_tracker, ops, repl):
"""
`:i` has a special meaning in Tensorflow. This function replaces all colons
with $ to avoid any possible conflicts.
Args:
shapes: Dictionary mapping blob names to their shapes/dimensions.
blob_name_tracker: Dictionary of all unique blob names (with respect to
some context).
ops: List of Caffe2 operators
repl: String representing the text to replace ':' with. Usually this is
'$'.
Returns:
None. Modifies blob_name_tracker in-place.
"""
def f(name):
return name.replace(":", repl)
_rename_all(shapes, blob_name_tracker, ops, f)
def _fill_missing_operator_names(ops):
"""
Give missing operators a name.
We expect C2 operators to be generally unnamed. This gives them a scope
(inferred from their outputs) and a name after their type. Duplicates will
be postfixed by an index.
Args:
ops: List of Caffe2 operators to assign names to.
Returns:
None: Modifies 'ops' in-place.
"""
seen = set()
for op in ops:
# Make sure operator names don't collide with blobs.
seen.update(op.input)
seen.update(op.output)
for op in ops:
if op.name:
name = op.name
elif op.output or op.input:
name_list = [os.path.dirname(name) for name in op.output or op.input]
scope = os.path.commonprefix(name_list)
name = os.path.join(scope, op.type)
else:
name = op.type
assert name
op.name = _make_unique_name(seen, name)
def _tf_device(device_option):
"""
Handle the devices.
Args:
device_option (caffe2_pb2.DeviceOption): DeviceOption protobuf,
associated to an operator, that contains information such as
device_type (optional), cuda_gpu_id (optional), node_name (optional,
tells which node the operator should execute on). See caffe2.proto
in caffe2/proto for the full list.
Returns:
Formatted string representing device information contained in
device_option.
"""
if not device_option.HasField("device_type"):
return ""
if (
device_option.device_type == caffe2_pb2.CPU
or device_option.device_type == caffe2_pb2.MKLDNN
):
return "/cpu:*"
if device_option.device_type == caffe2_pb2.CUDA:
return "/gpu:{}".format(device_option.device_id)
raise Exception("Unhandled device", device_option)
def _add_tf_shape(attr_dict, ints):
"""
Converts a list of ints to a TensorShapeProto representing the dimensions of
a blob/object.
Args:
attr_dict: Dictionary to update (usually attributes of a Node)
ints: List of integers representing dimensions of some object.
Returns:
None. Modifies attr_dict in-place.
"""
shape_proto = TensorShapeProto()
for i in ints:
dim = TensorShapeProto.Dim()
dim.size = i
shape_proto.dim.extend([dim])
attr_dict["_output_shapes"].list.shape.extend([shape_proto])
def _set_tf_attr(attr_dict, arg):
"""
Add attributes to a node. Key is the arg.name, and values can be shape,
floats, strings, ints or an empty list.
Args:
attr_dict: Dictionary to update (usually attributes of a Node)
arg: Object with name and data fields.
Returns:
None. Modifies attr_dict in-place.
"""
k = arg.name
if k == "shape" and arg.ints:
_add_tf_shape(attr_dict, arg.ints)
return
# Float
if arg.HasField("f"):
attr_dict[k].f = arg.f
return
# Integer
if arg.HasField("i"):
attr_dict[k].i = arg.i
return
# String
if arg.HasField("s"):
attr_dict[k].s = (
arg.s if isinstance(arg.s, bytes) else str(arg.s).encode("utf-8")
)
return
if arg.floats:
attr_dict[k].list.f.extend(arg.floats)
return
if arg.ints:
attr_dict[k].list.i.extend(arg.ints)
return
if arg.strings:
attr_dict[k].list.s.extend(
s if isinstance(s, bytes) else str(s).encode("utf-8") for s in arg.strings
)
return
# The value is an empty list.
attr_dict[k].list.s.extend([])
def _operator_to_node(shapes, op):
"""
Converts an operator to a node in a TF graph.
Args:
shapes: Dictionary mapping blob names to their shapes/dimensions.
op: The Caffe2 operator to convert to a TF graph node.
Returns:
n: The TF graph node created from op.
"""
assert op.name, op
n = NodeDef()
n.name = op.name
n.input.extend(op.input)
n.op = op.type
n.device = _tf_device(op.device_option)
if shapes:
# Add shapes in order.
for output in op.output:
if output not in shapes:
break
_add_tf_shape(n.attr, shapes[output])
for arg in op.arg:
_set_tf_attr(n.attr, arg)
return n
def _operator_to_node_simp(op, inter_blobs, seen):
"""
Convert the operators to nodes.
Args:
op: Caffe2 operator to convert to node
inter_blobs: Set of intermediate blobs
seen: Names that have already been used and are not unique
Returns:
nodes: Nodes representing 'op' and the outputs of 'op'
"""
assert op
nodes = []
outputs = [o for o in op.output if o not in inter_blobs]
seen.update(outputs)
len_outputs = len(outputs)
if len_outputs == 1:
n = NodeDef()
n.name = outputs[0]
# Here we are sure the name is unique.
n.input.extend(op.input)
n.op = op.type
n.device = _tf_device(op.device_option)
for arg in op.arg:
_set_tf_attr(n.attr, arg)
nodes.append(n)
elif len_outputs > 1:
# Create a name that is likely unique
if op.name:
name = op.name
else:
name_list = list(outputs)
scope = os.path.commonprefix(name_list)
name = os.path.join(scope, op.type)
assert name
op.name = _make_unique_name(seen, name)
device = _tf_device(op.device_option)
# Create additional output nodes
for output in outputs:
n = NodeDef()
n.name = output
n.input.extend([op.name])
n.op = "Blob"
n.device = device
nodes.append(n)
# Node for the current op
n = NodeDef()
n.name = op.name
n.input.extend(op.input)
n.op = op.type
n.device = device
for arg in op.arg:
_set_tf_attr(n.attr, arg)
nodes.append(n)
return nodes
def _blob_to_node(producing_ops, shapes, name):
"""
Converts a blob (operator input or output) to a node in a TF graph.
Args:
producing_ops: Dictionary of blob name to list of
(producing_op, blob_index within producing_op.output) mapping.
shapes: Dictionary mapping blob names to their shapes/dimensions.
name: String representing the name of this blob.
Returns:
n: The TF graph node created from this blob.
"""
assert name
n = NodeDef()
n.name = name
# Get all ops that have the blob corresponding to 'name' as one of their
# outputs. See _operators_to_graph_def.
produced_by = producing_ops.get(name, [])
if len(produced_by) > 0:
n.op = "Blob"
else:
# This blob is not produced but is instead a TF Placeholder where a
# value is passed in.
n.op = "Placeholder"
n.input.extend("%s:%d" % (p_op.name, i) for p_op, i in produced_by)
if produced_by:
device = produced_by[0][0].device_option
if all(producer[0].device_option == device for producer in produced_by):
n.device = _tf_device(device)
if shapes and name in shapes:
_add_tf_shape(n.attr, shapes[name])
return n
def _clear_debug_info(ops, perform_clear):
"""
Removes debug information from operators, they are copious.
Args:
ops: List of Caffe2 operators
perform_clear: Boolean passed from _operators_to_graph_def specifying
whether to remove the debug information. This boolean is passed into
this function to reduce the complexity of _operators_to_graph_def.
Returns:
None. Modifies the list of Caffe2 operators in-place and removes the
'debug_info' field.
"""
if not perform_clear:
return
for op in ops:
if op.HasField("debug_info"):
op.ClearField("debug_info")
def _check_if_forward(blob):
"""
Blobs with names containing '_m' or 'grad' are part of the backward pass.
This function references facebookresearch/Detectron/detectron/utils/net.py.
Args:
blob: The blob to inspect
Returns:
Boolean representing whether this blob is part of the forward pass
"""
#
return blob.find("__m") < 0 or blob.find("grad") < 0
def _check_if_cpu(blob):
"""
Check if the blob's name starts with '_gpu'.
Args:
blob: The blob to inspect
Returns:
Boolean representing whether this blob is associated with a gpu
"""
return not blob.startswith("_gpu")
def _compute_in_out(ops):
"""
Find the input, intermediate and output nodes of a set of operators.
Args:
ops: List of Caffe2 operators to look through
Returns:
input_blobs: The input nodes of the set of operators
inter_blobs: The intermediate nodes of the set of operators
output_blobs: The output nodes of the set of operators
"""
in_blobs = set()
out_blobs = set()
for op in ops:
for input_blob in op.input:
in_blobs.add(input_blob)
for output_blob in op.output:
out_blobs.add(output_blob)
input_blobs = list(in_blobs.difference(out_blobs))
output_blobs = list(out_blobs.difference(in_blobs))
inter_blobs = {b for b in output_blobs if b.startswith("_")}
output_blobs = [b for b in output_blobs if b not in inter_blobs]
return input_blobs, inter_blobs, output_blobs
def _filter_ops(ops, filter_fn, perform_filter):
"""
Filter unwanted operators based on criteria in 'filter_fn'.
Args:
ops: List of Caffe2 operators to filter
filter_fn: Criteria function for whether inputs/outputs in an operator
should be filtered.
perform_filter: Boolean passed from _operators_to_graph_def specifying
whether to filter operators
Returns:
new_ops: Subset of ops containing a subset of their inputs and outputs.
"""
if not perform_filter:
return ops
new_ops = []
for op in ops:
inputs = list(op.input)
outputs = list(op.output)
del op.input[:]
del op.output[:]
new_inputs = [i for i in inputs if filter_fn(i)]
new_outputs = [o for o in outputs if filter_fn(o)]
# Only add the op if output is not empty
if new_outputs:
op.input.extend(new_inputs)
op.output.extend(new_outputs)
new_ops.append(op)
return new_ops
def _operators_to_graph_def(
shapes,
ops,
colon_replacement="$",
with_ssa=True,
with_gradient_scope=True,
blob_name_tracker=None,
show_simplified=False,
custom_rename=None,
):
"""
Main function to convert set of operators to a graph.
Args:
shapes: Dictionary mapping blob names to their shapes/dimensions.
ops: List of Caffe2 operators, representing some computation graph
### **kwargs (model_to_graph_def, nets_to_graph_def, protos_to_graph_def) ###
colon_replacement: Symbol to replace ':' with. ':i' in TF has a special
meaning, so we need to replace it with a non-conflicting symbol.
with_ssa: Boolean
with_gradient_scope: Boolean
blob_name_tracker: Dictionary tracking names of blobs (inputs/outputs
from operators)
show_simplified: Whether to show a simplified version of the model graph
Sets all of the following values:
clear_debug_info: Boolean representing whether to silence debug
info (which can be very verbose)
show_forward_only: Boolean representing whether to only show
blobs involved in the forward pass
show_cpu_only: Boolean representing whether to only show blobs
that are not associated with a gpu
use_tensorflow_naming: Boolean representing whether to convert
some common Caffe2 naming conventions to their Tensorflow
counterparts
custom_rename: Function string -> string that defines a custom
renaming function to use.
Returns:
current_graph: GraphDef representing the computation graph formed by the
set of operators.
"""
if blob_name_tracker is not None:
blob_name_tracker.clear()
else:
blob_name_tracker = {}
blob_name_tracker.update(_get_blob_names(ops))
_clear_debug_info(ops, show_simplified) # clear_debug_info
ops = _filter_ops(ops, _check_if_forward, show_simplified) # show_forward_only
ops = _filter_ops(ops, _check_if_cpu, show_simplified) # show_cpu_only
if custom_rename:
_rename_all(shapes, blob_name_tracker, ops, custom_rename)
if colon_replacement:
_replace_colons(shapes, blob_name_tracker, ops, colon_replacement)
if with_ssa:
_convert_to_ssa(shapes, blob_name_tracker, ops)
if with_gradient_scope:
_add_gradient_scope(shapes, blob_name_tracker, ops)
_fill_missing_operator_names(ops)
if show_simplified: # use_tensorflow_naming
_rename_tensorflow_style(shapes, blob_name_tracker, ops)
producing_ops: Dict[caffe2_pb2.OperatorDef, List] = {}
blobs = set()
input_blobs, inter_blobs, _ = _compute_in_out(ops)
current_graph = GraphDef()
seen = set(input_blobs)
for op in ops:
nodes_from_op = (
_operator_to_node_simp(op, inter_blobs, seen)
if show_simplified
else [_operator_to_node(shapes, op)]
) # .extend() expects an iterable
current_graph.node.extend(nodes_from_op)
for input_blob in op.input:
blobs.add(input_blob)
for i, output_blob in enumerate(op.output):
blobs.add(output_blob)
producing_ops.setdefault(output_blob, []).append((op, i))
if show_simplified:
# Show a cleaner, easier-to-interpret version of the model graph
blobs = input_blobs
for blob in sorted(blobs):
current_graph.node.extend([_blob_to_node(producing_ops, {}, blob)])
return current_graph
def _propagate_device_option(net_def):
"""
Propagate the device options from net to operators.
Args:
net_def: A caffe2_pb2.NetDef representing a computation graph. The graph
consists of Caffe2 operators.
Returns:
None. Iterates through all ops contained within the net. For each op,
modifies the op device_option in-place to be the net device_option
if the op has no pre-existing device_option, and leaves the op as-is
if it already has a device_option.
"""
if not net_def.HasField("device_option"):
return
for op in net_def.op:
if not op.HasField("device_option"):
op.device_option.CopyFrom(net_def.device_option)
def _try_get_shapes(nets):
"""
Get missing shapes for all blobs contained in the nets.
Args:
nets: List of core.Net to extract blob shape information from.
Returns:
Dictionary containing blob name to shape/dimensions mapping. The net
is a computation graph that is composed of operators, and the
operators have input and output blobs, each with their own dims.
"""
try:
# Note: this will inspect the workspace for better or worse.
# We don't care about the types, only the shapes
shapes, _ = workspace.InferShapesAndTypes(nets)
return shapes
except Exception as e:
logging.warning("Failed to compute shapes: %s", e)
return {}
def model_to_graph_def(model, **kwargs):
"""
Convert a Caffe2 model to a Tensorflow graph. This function extracts
'param_init_net' and 'net' from the model and passes it to nets_to_graph()
for further processing.
Args:
model (cnn.CNNModelHelper, model_helper.ModelHelper): The model to
extract the nets (instances of core.Net) from.
Returns:
Call to nets_to_graph_def() with extracted 'param_init_net', 'net' and
**kwargs. See _operators_to_graph_def for detailed **kwargs.
"""
nets = [model.param_init_net, model.net]
return nets_to_graph_def(nets, **kwargs)
def nets_to_graph_def(nets, shapes=None, **kwargs):
"""
Convert a set of Caffe2 nets to a Tensorflow graph.
Args:
nets: List of core.Nets. core.Net is a wrapper around a NetDef protobuf.
The corresponding protobuf can be extracted using .Proto().
shapes: Dictionary mapping blob names to their shapes/dimensions.
Returns:
Call to protos_to_graph_def() with the extracted NetDef protobufs and
**kwargs. See _operators_to_graph_def for detailed **kwargs.
"""
# if shapes is None:
# shapes = _try_get_shapes(nets)
# _try_get_shapes(nets) depends on workspace.InferShapesAndTypes(nets),
# which is currently broken (segfault). We omit the shapes for now.
shapes = {}
nets = [copy.deepcopy(net.Proto()) for net in nets]
shapes = copy.deepcopy(shapes)
return protos_to_graph_def(nets, shapes, **kwargs)
def protos_to_graph_def(net_defs, shapes=None, **kwargs):
"""
Convert a set of Caffe2 net definitions to a Tensorflow graph.
Args:
net_defs: List of caffe2_pb2.NetDef protobufs representing computation
graphs.
shapes: Dictionary mapping blob names to their shapes/dimensions.
Returns:
Call to _operators_to_graph_def() with the extracted operators from the
NetDefs and **kwargs. See _operators_to_graph_def for detailed
**kwargs.
"""
for net in net_defs:
_propagate_device_option(net)
shapes = copy.deepcopy(shapes or {})
ops = [op for net_def in net_defs for op in net_def.op]
return _operators_to_graph_def(shapes, ops, **kwargs)
|
pytorch-master
|
torch/utils/tensorboard/_caffe2_graph.py
|
import numpy as np
# Functions for converting
def figure_to_image(figures, close=True):
"""Render matplotlib figure to numpy format.
Note that this requires the ``matplotlib`` package.
Args:
figure (matplotlib.pyplot.figure) or list of figures: figure or a list of figures
close (bool): Flag to automatically close the figure
Returns:
numpy.array: image in [CHW] order
"""
import matplotlib.pyplot as plt
import matplotlib.backends.backend_agg as plt_backend_agg
def render_to_rgb(figure):
canvas = plt_backend_agg.FigureCanvasAgg(figure)
canvas.draw()
data = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)
w, h = figure.canvas.get_width_height()
image_hwc = data.reshape([h, w, 4])[:, :, 0:3]
image_chw = np.moveaxis(image_hwc, source=2, destination=0)
if close:
plt.close(figure)
return image_chw
if isinstance(figures, list):
images = [render_to_rgb(figure) for figure in figures]
return np.stack(images)
else:
image = render_to_rgb(figures)
return image
def _prepare_video(V):
"""
Converts a 5D tensor [batchsize, time(frame), channel(color), height, width]
into 4D tensor with dimension [time(frame), new_width, new_height, channel].
A batch of images are spreaded to a grid, which forms a frame.
e.g. Video with batchsize 16 will have a 4x4 grid.
"""
b, t, c, h, w = V.shape
if V.dtype == np.uint8:
V = np.float32(V) / 255.0
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
if not is_power2(V.shape[0]):
len_addition = int(2 ** V.shape[0].bit_length() - V.shape[0])
V = np.concatenate((V, np.zeros(shape=(len_addition, t, c, h, w))), axis=0)
n_rows = 2 ** ((b.bit_length() - 1) // 2)
n_cols = V.shape[0] // n_rows
V = np.reshape(V, newshape=(n_rows, n_cols, t, c, h, w))
V = np.transpose(V, axes=(2, 0, 4, 1, 5, 3))
V = np.reshape(V, newshape=(t, n_rows * h, n_cols * w, c))
return V
def make_grid(I, ncols=8):
# I: N1HW or N3HW
assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here"
if I.shape[1] == 1:
I = np.concatenate([I, I, I], 1)
assert I.ndim == 4 and I.shape[1] == 3
nimg = I.shape[0]
H = I.shape[2]
W = I.shape[3]
ncols = min(nimg, ncols)
nrows = int(np.ceil(float(nimg) / ncols))
canvas = np.zeros((3, H * nrows, W * ncols), dtype=I.dtype)
i = 0
for y in range(nrows):
for x in range(ncols):
if i >= nimg:
break
canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i]
i = i + 1
return canvas
# if modality == 'IMG':
# if x.dtype == np.uint8:
# x = x.astype(np.float32) / 255.0
def convert_to_HWC(tensor, input_format): # tensor: numpy array
assert len(set(input_format)) == len(
input_format
), "You can not use the same dimension shordhand twice. \
input_format: {}".format(
input_format
)
assert len(tensor.shape) == len(
input_format
), "size of input tensor and input format are different. \
tensor shape: {}, input_format: {}".format(
tensor.shape, input_format
)
input_format = input_format.upper()
if len(input_format) == 4:
index = [input_format.find(c) for c in "NCHW"]
tensor_NCHW = tensor.transpose(index)
tensor_CHW = make_grid(tensor_NCHW)
return tensor_CHW.transpose(1, 2, 0)
if len(input_format) == 3:
index = [input_format.find(c) for c in "HWC"]
tensor_HWC = tensor.transpose(index)
if tensor_HWC.shape[2] == 1:
tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2)
return tensor_HWC
if len(input_format) == 2:
index = [input_format.find(c) for c in "HW"]
tensor = tensor.transpose(index)
tensor = np.stack([tensor, tensor, tensor], 2)
return tensor
|
pytorch-master
|
torch/utils/tensorboard/_utils.py
|
#!/usr/bin/env python3
"""
model_dump: a one-stop shop for TorchScript model inspection.
The goal of this tool is to provide a simple way to extract lots of
useful information from a TorchScript model and make it easy for humans
to consume. It (mostly) replaces zipinfo, common uses of show_pickle,
and various ad-hoc analysis notebooks.
The tool extracts information from the model and serializes it as JSON.
That JSON can then be rendered by an HTML+JS page, either by
loading the JSON over HTTP or producing a fully self-contained page
with all of the code and data burned-in.
"""
# Maintainer notes follow.
"""
The implementation strategy has tension between 3 goals:
- Small file size.
- Fully self-contained.
- Easy, modern JS environment.
Using Preact and HTM achieves 1 and 2 with a decent result for 3.
However, the models I tested with result in ~1MB JSON output,
so even using something heavier like full React might be tolerable
if the build process can be worked out.
One principle I have followed that I think is very beneficial
is to keep the JSON data as close as possible to the model
and do most of the rendering logic on the client.
This makes for easier development (just refresh, usually),
allows for more laziness and dynamism, and lets us add more
views of the same data without bloating the HTML file.
Currently, this code doesn't actually load the model or even
depend on any part of PyTorch. I don't know if that's an important
feature to maintain, but it's probably worth preserving the ability
to run at least basic analysis on models that cannot be loaded.
I think the easiest way to develop this code is to cd into model_dump and
run "python -m http.server", then load http://localhost:8000/skeleton.html
in the browser. In another terminal, run
"python -m torch.utils.model_dump --style=json FILE > \
torch/utils/model_dump/model_info.json"
every time you update the Python code or model.
When you update JS, just refresh.
Possible improvements:
- Fix various TODO comments in this file and the JS.
- Make the HTML much less janky, especially the auxiliary data panel.
- Make the auxiliary data panel start small, expand when
data is available, and have a button to clear/contract.
- Clean up the JS. There's a lot of copypasta because
I don't really know how to use Preact.
- Make the HTML render and work nicely inside a Jupyter notebook.
- Add the ability for JS to choose the URL to load the JSON based
on the page URL (query or hash). That way we could publish the
inlined skeleton once and have it load various JSON blobs.
- Add a button to expand all expandable sections so ctrl-F works well.
- Add hyperlinking from data to code, and code to code.
- Add hyperlinking from debug info to Diffusion.
- Make small tensor contents available.
- Do something nice for quantized models
(they probably don't work at all right now).
"""
import sys
import os
import io
import pathlib
import re
import argparse
import zipfile
import json
import pickle
import pprint
import urllib.parse
from typing import (
Dict,
)
import torch.utils.show_pickle
DEFAULT_EXTRA_FILE_SIZE_LIMIT = 16 * 1024
__all__ = ['get_storage_info', 'hierarchical_pickle', 'get_model_info', 'get_inline_skeleton',
'burn_in_info', 'get_info_and_burn_skeleton']
def get_storage_info(storage):
assert isinstance(storage, torch.utils.show_pickle.FakeObject)
assert storage.module == "pers"
assert storage.name == "obj"
assert storage.state is None
assert isinstance(storage.args, tuple)
assert len(storage.args) == 1
sa = storage.args[0]
assert isinstance(sa, tuple)
assert len(sa) == 5
assert sa[0] == "storage"
assert isinstance(sa[1], torch.utils.show_pickle.FakeClass)
assert sa[1].module == "torch"
assert sa[1].name.endswith("Storage")
storage_info = [sa[1].name.replace("Storage", "")] + list(sa[2:])
return storage_info
def hierarchical_pickle(data):
if isinstance(data, (bool, int, float, str, type(None))):
return data
if isinstance(data, list):
return [hierarchical_pickle(d) for d in data]
if isinstance(data, tuple):
return {
"__tuple_values__": hierarchical_pickle(list(data)),
}
if isinstance(data, dict):
return {
"__is_dict__": True,
"keys": hierarchical_pickle(list(data.keys())),
"values": hierarchical_pickle(list(data.values())),
}
if isinstance(data, torch.utils.show_pickle.FakeObject):
typename = f"{data.module}.{data.name}"
if (
typename.startswith("__torch__.") or
typename.startswith("torch.jit.LoweredWrapper.") or
typename.startswith("torch.jit.LoweredModule.")
):
assert data.args == ()
return {
"__module_type__": typename,
"state": hierarchical_pickle(data.state),
}
if typename == "torch._utils._rebuild_tensor_v2":
assert data.state is None
storage, offset, size, stride, requires_grad, hooks = data.args
storage_info = get_storage_info(storage)
return {"__tensor_v2__": [storage_info, offset, size, stride, requires_grad]}
if typename == "torch._utils._rebuild_qtensor":
assert data.state is None
storage, offset, size, stride, quantizer, requires_grad, hooks = data.args
storage_info = get_storage_info(storage)
assert isinstance(quantizer, tuple)
assert isinstance(quantizer[0], torch.utils.show_pickle.FakeClass)
assert quantizer[0].module == "torch"
if quantizer[0].name == "per_tensor_affine":
assert len(quantizer) == 3
assert isinstance(quantizer[1], float)
assert isinstance(quantizer[2], int)
quantizer_extra = list(quantizer[1:3])
else:
quantizer_extra = []
quantizer_json = [quantizer[0].name] + quantizer_extra
return {"__qtensor__": [storage_info, offset, size, stride, quantizer_json, requires_grad]}
if typename == "torch.jit._pickle.restore_type_tag":
assert data.state is None
obj, typ = data.args
assert isinstance(typ, str)
return hierarchical_pickle(obj)
if re.fullmatch(r"torch\.jit\._pickle\.build_[a-z]+list", typename):
assert data.state is None
ls, = data.args
assert isinstance(ls, list)
return hierarchical_pickle(ls)
if typename == "torch.device":
assert data.state is None
name, = data.args
assert isinstance(name, str)
# Just forget that it was a device and return the name.
return name
if typename == "builtin.UnicodeDecodeError":
assert data.state is None
msg, = data.args
assert isinstance(msg, str)
# Hack: Pretend this is a module so we don't need custom serialization.
# Hack: Wrap the message in a tuple so it looks like a nice state object.
# TODO: Undo at least that second hack. We should support string states.
return {
"__module_type__": typename,
"state": hierarchical_pickle((msg,)),
}
raise Exception(f"Can't prepare fake object of type for JS: {typename}")
raise Exception(f"Can't prepare data of type for JS: {type(data)}")
def get_model_info(
path_or_file,
title=None,
extra_file_size_limit=DEFAULT_EXTRA_FILE_SIZE_LIMIT):
"""Get JSON-friendly information about a model.
The result is suitable for being saved as model_info.json,
or passed to burn_in_info.
"""
if isinstance(path_or_file, os.PathLike):
default_title = os.fspath(path_or_file)
file_size = path_or_file.stat().st_size # type: ignore[attr-defined]
elif isinstance(path_or_file, str):
default_title = path_or_file
file_size = pathlib.Path(path_or_file).stat().st_size
else:
default_title = "buffer"
path_or_file.seek(0, io.SEEK_END)
file_size = path_or_file.tell()
path_or_file.seek(0)
title = title or default_title
with zipfile.ZipFile(path_or_file) as zf:
path_prefix = None
zip_files = []
for zi in zf.infolist():
prefix = re.sub("/.*", "", zi.filename)
if path_prefix is None:
path_prefix = prefix
elif prefix != path_prefix:
raise Exception(f"Mismatched prefixes: {path_prefix} != {prefix}")
zip_files.append(dict(
filename=zi.filename,
compression=zi.compress_type,
compressed_size=zi.compress_size,
file_size=zi.file_size,
))
assert path_prefix is not None
version = zf.read(path_prefix + "/version").decode("utf-8").strip()
def get_pickle(name):
assert path_prefix is not None
with zf.open(path_prefix + f"/{name}.pkl") as handle:
raw = torch.utils.show_pickle.DumpUnpickler(handle, catch_invalid_utf8=True).load()
return hierarchical_pickle(raw)
model_data = get_pickle("data")
constants = get_pickle("constants")
# Intern strings that are likely to be re-used.
# Pickle automatically detects shared structure,
# so re-used strings are stored efficiently.
# However, JSON has no way of representing this,
# so we have to do it manually.
interned_strings : Dict[str, int] = {}
def ist(s):
if s not in interned_strings:
interned_strings[s] = len(interned_strings)
return interned_strings[s]
code_files = {}
for zi in zf.infolist():
if not zi.filename.endswith(".py"):
continue
with zf.open(zi) as handle:
raw_code = handle.read()
with zf.open(zi.filename + ".debug_pkl") as handle:
raw_debug = handle.read()
# Parse debug info and add begin/end markers if not present
# to ensure that we cover the entire source code.
debug_info_t = pickle.loads(raw_debug)
text_table = None
if (len(debug_info_t) == 3 and
isinstance(debug_info_t[0], str) and
debug_info_t[0] == 'FORMAT_WITH_STRING_TABLE'):
_, text_table, content = debug_info_t
def parse_new_format(line):
# (0, (('', '', 0), 0, 0))
num, ((text_indexes, fname_idx, offset), start, end), tag = line
text = ''.join(text_table[x] for x in text_indexes) # type: ignore[index]
fname = text_table[fname_idx] # type: ignore[index]
return num, ((text, fname, offset), start, end), tag
debug_info_t = map(parse_new_format, content)
debug_info = list(debug_info_t)
if not debug_info:
debug_info.append((0, (('', '', 0), 0, 0)))
if debug_info[-1][0] != len(raw_code):
debug_info.append((len(raw_code), (('', '', 0), 0, 0)))
code_parts = []
for di, di_next in zip(debug_info, debug_info[1:]):
start, source_range, *_ = di
end = di_next[0]
assert end > start
source, s_start, s_end = source_range
s_text, s_file, s_line = source
# TODO: Handle this case better. TorchScript ranges are in bytes,
# but JS doesn't really handle byte strings.
# if bytes and chars are not equivalent for this string,
# zero out the ranges so we don't highlight the wrong thing.
if len(s_text) != len(s_text.encode("utf-8")):
s_start = 0
s_end = 0
text = raw_code[start:end]
code_parts.append([text.decode("utf-8"), ist(s_file), s_line, ist(s_text), s_start, s_end])
code_files[zi.filename] = code_parts
extra_files_json_pattern = re.compile(re.escape(path_prefix) + "/extra/.*\\.json")
extra_files_jsons = {}
for zi in zf.infolist():
if not extra_files_json_pattern.fullmatch(zi.filename):
continue
if zi.file_size > extra_file_size_limit:
continue
with zf.open(zi) as handle:
try:
json_content = json.load(handle)
extra_files_jsons[zi.filename] = json_content
except json.JSONDecodeError:
extra_files_jsons[zi.filename] = "INVALID JSON"
always_render_pickles = {
"bytecode.pkl",
}
extra_pickles = {}
for zi in zf.infolist():
if not zi.filename.endswith(".pkl"):
continue
with zf.open(zi) as handle:
# TODO: handle errors here and just ignore the file?
# NOTE: For a lot of these files (like bytecode),
# we could get away with just unpickling, but this should be safer.
obj = torch.utils.show_pickle.DumpUnpickler(handle, catch_invalid_utf8=True).load()
buf = io.StringIO()
pprint.pprint(obj, buf)
contents = buf.getvalue()
# Checked the rendered length instead of the file size
# because pickles with shared structure can explode in size during rendering.
if os.path.basename(zi.filename) not in always_render_pickles and \
len(contents) > extra_file_size_limit:
continue
extra_pickles[zi.filename] = contents
return {"model": dict(
title=title,
file_size=file_size,
version=version,
zip_files=zip_files,
interned_strings=list(interned_strings),
code_files=code_files,
model_data=model_data,
constants=constants,
extra_files_jsons=extra_files_jsons,
extra_pickles=extra_pickles,
)}
def get_inline_skeleton():
"""Get a fully-inlined skeleton of the frontend.
The returned HTML page has no external network dependencies for code.
It can load model_info.json over HTTP, or be passed to burn_in_info.
"""
if sys.version_info < (3, 7):
raise Exception("get_inline_skeleton requires Python 3.7")
import importlib.resources
skeleton = importlib.resources.read_text(__package__, "skeleton.html")
js_code = importlib.resources.read_text(__package__, "code.js")
for js_module in ["preact", "htm"]:
js_lib = importlib.resources.read_binary(__package__, f"{js_module}.mjs")
js_url = "data:application/javascript," + urllib.parse.quote(js_lib)
js_code = js_code.replace(f"https://unpkg.com/{js_module}?module", js_url)
skeleton = skeleton.replace(' src="./code.js">', ">\n" + js_code)
return skeleton
def burn_in_info(skeleton, info):
"""Burn model info into the HTML skeleton.
The result will render the hard-coded model info and
have no external network dependencies for code or data.
"""
# Note that Python's json serializer does not escape slashes in strings.
# Since we're inlining this JSON directly into a script tag, a string
# containing "</script>" would end the script prematurely and
# mess up our page. Unconditionally escape fixes that.
return skeleton.replace(
"BURNED_IN_MODEL_INFO = null",
"BURNED_IN_MODEL_INFO = " + json.dumps(info, sort_keys=True).replace("/", "\\/"))
def get_info_and_burn_skeleton(path_or_bytesio, **kwargs):
model_info = get_model_info(path_or_bytesio, **kwargs)
skeleton = get_inline_skeleton()
page = burn_in_info(skeleton, model_info)
return page
def main(argv, *, stdout=None):
parser = argparse.ArgumentParser()
parser.add_argument("--style", choices=["json", "html"])
parser.add_argument("--title")
parser.add_argument("model")
args = parser.parse_args(argv[1:])
info = get_model_info(args.model, title=args.title)
output = stdout or sys.stdout
if args.style == "json":
output.write(json.dumps(info, sort_keys=True) + "\n")
elif args.style == "html":
skeleton = get_inline_skeleton()
page = burn_in_info(skeleton, info)
output.write(page)
else:
raise Exception("Invalid style")
|
pytorch-master
|
torch/utils/model_dump/__init__.py
|
#!/usr/bin/env python3
import sys
from . import main
sys.exit(main(sys.argv))
|
pytorch-master
|
torch/utils/model_dump/__main__.py
|
import warnings
from typing import Any, List, Optional, Set
import torch
import torch.utils.data.datapipes as dp
from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse
__all__ = [
"apply_sharding",
"apply_shuffle_seed",
"apply_shuffle_settings",
"get_all_graph_pipes",
]
def get_all_graph_pipes(graph: DataPipeGraph) -> List[DataPipe]:
return _get_all_graph_pipes_helper(graph, set())
def _get_all_graph_pipes_helper(graph: DataPipeGraph, id_cache: Set[int]) -> List[DataPipe]:
results: List[DataPipe] = []
for dp_id, (datapipe, sub_graph) in graph.items():
if dp_id in id_cache:
continue
id_cache.add(dp_id)
results.append(datapipe)
results.extend(_get_all_graph_pipes_helper(sub_graph, id_cache))
return results
def apply_sharding(datapipe: DataPipe, num_of_instances: int, instance_id: int) -> DataPipe:
graph = traverse(datapipe, only_datapipe=True)
all_pipes = get_all_graph_pipes(graph)
already_applied_to = None
for pipe in all_pipes:
if hasattr(pipe, 'is_shardable'):
if pipe.is_shardable():
if hasattr(pipe, 'apply_sharding'):
if already_applied_to is not None:
raise RuntimeError('This implementation of sharding can be only applied once per instance of DataPipeline.',
'Already applied to', already_applied_to, 'while trying to apply to', pipe)
pipe.apply_sharding(num_of_instances, instance_id)
already_applied_to = pipe
return datapipe
def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool]) -> DataPipe:
if shuffle is None:
return datapipe
graph = traverse(datapipe, only_datapipe=True)
all_pipes = get_all_graph_pipes(graph)
shufflers = [pipe for pipe in all_pipes if isinstance(pipe, (dp.iter.Shuffler, dp.map.Shuffler))]
if not shufflers and shuffle:
warnings.warn(
"`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. "
"Be aware that the default buffer size might not be sufficient for your task."
)
datapipe = datapipe.shuffle()
shufflers = [datapipe, ] # type: ignore[list-item]
for shuffler in shufflers:
shuffler.set_shuffle(shuffle)
return datapipe
def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe:
graph = traverse(datapipe, only_datapipe=True)
all_pipes = get_all_graph_pipes(graph)
shufflers = {pipe for pipe in all_pipes if isinstance(pipe, (dp.iter.Shuffler, dp.map.Shuffler))}
for shuffler in shufflers:
shuffle_seed = int(torch.empty((), dtype=torch.int64).random_(generator=rng).item())
shuffler.set_seed(shuffle_seed)
return datapipe
|
pytorch-master
|
torch/utils/data/graph_settings.py
|
import io
import pickle
from torch.utils.data import IterDataPipe, MapDataPipe
from torch.utils.data._utils.serialization import DILL_AVAILABLE
from typing import Dict, List, Set, Tuple, Type, Union
__all__ = ["traverse"]
DataPipe = Union[IterDataPipe, MapDataPipe]
DataPipeGraph = Dict[int, Tuple[DataPipe, "DataPipeGraph"]] # type: ignore[misc]
reduce_ex_hook = None
def _stub_unpickler():
return "STUB"
# TODO(VitalyFedyunin): Make sure it works without dill module installed
def _list_connected_datapipes(scan_obj: DataPipe, only_datapipe: bool, cache: Set[int]) -> List[DataPipe]:
f = io.BytesIO()
p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is
if DILL_AVAILABLE:
from dill import Pickler as dill_Pickler
d = dill_Pickler(f)
else:
d = None
def stub_pickler(obj):
return _stub_unpickler, ()
captured_connections = []
def getstate_hook(obj):
state = {}
for k, v in obj.__dict__.items():
if isinstance(v, (IterDataPipe, MapDataPipe, tuple)):
state[k] = v
return state
def reduce_hook(obj):
if obj == scan_obj or id(obj) in cache:
raise NotImplementedError
else:
captured_connections.append(obj)
cache.add(id(obj))
return _stub_unpickler, ()
datapipe_classes: Tuple[Type[DataPipe]] = (IterDataPipe, MapDataPipe) # type: ignore[assignment]
try:
for cls in datapipe_classes:
cls.set_reduce_ex_hook(reduce_hook)
if only_datapipe:
cls.set_getstate_hook(getstate_hook)
try:
p.dump(scan_obj)
except (pickle.PickleError, AttributeError, TypeError):
if DILL_AVAILABLE:
d.dump(scan_obj)
else:
raise
finally:
for cls in datapipe_classes:
cls.set_reduce_ex_hook(None)
if only_datapipe:
cls.set_getstate_hook(None)
if DILL_AVAILABLE:
from dill import extend as dill_extend
dill_extend(False) # Undo change to dispatch table
return captured_connections
def traverse(datapipe: DataPipe, only_datapipe: bool = False) -> DataPipeGraph:
cache: Set[int] = set()
return _traverse_helper(datapipe, only_datapipe, cache)
# Add cache here to prevent infinite recursion on DataPipe
def _traverse_helper(datapipe: DataPipe, only_datapipe: bool, cache: Set[int]) -> DataPipeGraph:
if not isinstance(datapipe, (IterDataPipe, MapDataPipe)):
raise RuntimeError("Expected `IterDataPipe` or `MapDataPipe`, but {} is found".format(type(datapipe)))
dp_id = id(datapipe)
if dp_id in cache:
return {}
cache.add(dp_id)
items = _list_connected_datapipes(datapipe, only_datapipe, cache.copy())
d: DataPipeGraph = {dp_id: (datapipe, {})}
for item in items:
# Using cache.copy() here is to prevent recursion on a single path rather than global graph
# Single DataPipe can present multiple times in different paths in graph
d[dp_id][1].update(_traverse_helper(item, only_datapipe, cache.copy()))
return d
|
pytorch-master
|
torch/utils/data/graph.py
|
# TODO(VitalyFedyunin): Rearranging this imports leads to crash,
# need to cleanup dependencies and fix it
from torch.utils.data.sampler import (
BatchSampler,
RandomSampler,
Sampler,
SequentialSampler,
SubsetRandomSampler,
WeightedRandomSampler,
)
from torch.utils.data.dataset import (
ChainDataset,
ConcatDataset,
Dataset,
IterableDataset,
Subset,
TensorDataset,
random_split,
)
from torch.utils.data.datapipes.datapipe import (
DFIterDataPipe,
DataChunk,
IterDataPipe,
MapDataPipe,
)
from torch.utils.data.dataloader import (
DataLoader,
_DatasetKind,
get_worker_info,
default_collate,
default_convert,
)
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.datapipes._decorator import (
argument_validation,
functional_datapipe,
guaranteed_datapipes_determinism,
non_deterministic,
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.dataloader_experimental import DataLoader2
from torch.utils.data import communication
__all__ = ['BatchSampler',
'ChainDataset',
'ConcatDataset',
'DFIterDataPipe',
'DataChunk',
'DataLoader',
'DataLoader2',
'Dataset',
'DistributedSampler',
'IterDataPipe',
'IterableDataset',
'MapDataPipe',
'RandomSampler',
'Sampler',
'SequentialSampler',
'Subset',
'SubsetRandomSampler',
'TensorDataset',
'WeightedRandomSampler',
'_DatasetKind',
'argument_validation',
'communication',
'default_collate',
'default_convert',
'functional_datapipe',
'get_worker_info',
'guaranteed_datapipes_determinism',
'non_deterministic',
'random_split',
'runtime_validation',
'runtime_validation_disabled']
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
pytorch-master
|
torch/utils/data/__init__.py
|
import bisect
import warnings
import math
from typing import (
Generic,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union
)
# No 'default_generator' in torch/__init__.pyi
from torch import default_generator, randperm
from torch._utils import _accumulate
from ... import Generator, Tensor
__all__ = [
"Dataset",
"IterableDataset",
"TensorDataset",
"ConcatDataset",
"ChainDataset",
"Subset",
"random_split",
]
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
class Dataset(Generic[T_co]):
r"""An abstract class representing a :class:`Dataset`.
All datasets that represent a map from keys to data samples should subclass
it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a
data sample for a given key. Subclasses could also optionally overwrite
:meth:`__len__`, which is expected to return the size of the dataset by many
:class:`~torch.utils.data.Sampler` implementations and the default options
of :class:`~torch.utils.data.DataLoader`.
.. note::
:class:`~torch.utils.data.DataLoader` by default constructs a index
sampler that yields integral indices. To make it work with a map-style
dataset with non-integral indices/keys, a custom sampler must be provided.
"""
def __getitem__(self, index) -> T_co:
raise NotImplementedError
def __add__(self, other: 'Dataset[T_co]') -> 'ConcatDataset[T_co]':
return ConcatDataset([self, other])
# No `def __len__(self)` default?
# See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
# in pytorch/torch/utils/data/sampler.py
class IterableDataset(Dataset[T_co]):
r"""An iterable Dataset.
All datasets that represent an iterable of data samples should subclass it.
Such form of datasets is particularly useful when data come from a stream.
All subclasses should overwrite :meth:`__iter__`, which would return an
iterator of samples in this dataset.
When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader`
iterator. When :attr:`num_workers > 0`, each worker process will have a
different copy of the dataset object, so it is often desired to configure
each copy independently to avoid having duplicate data returned from the
workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
process, returns information about the worker. It can be used in either the
dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
:attr:`worker_init_fn` option to modify each copy's behavior.
Example 1: splitting workload across all workers in :meth:`__iter__`::
>>> class MyIterableDataset(torch.utils.data.IterableDataset):
... def __init__(self, start, end):
... super(MyIterableDataset).__init__()
... assert end > start, "this example code only works with end >= start"
... self.start = start
... self.end = end
...
... def __iter__(self):
... worker_info = torch.utils.data.get_worker_info()
... if worker_info is None: # single-process data loading, return the full iterator
... iter_start = self.start
... iter_end = self.end
... else: # in a worker process
... # split workload
... per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
... worker_id = worker_info.id
... iter_start = self.start + worker_id * per_worker
... iter_end = min(iter_start + per_worker, self.end)
... return iter(range(iter_start, iter_end))
...
>>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
>>> ds = MyIterableDataset(start=3, end=7)
>>> # Single-process loading
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
[tensor([3]), tensor([4]), tensor([5]), tensor([6])]
>>> # Mult-process loading with two worker processes
>>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6].
>>> # xdoctest: +IGNORE_WANT("non deterministic")
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
[tensor([3]), tensor([5]), tensor([4]), tensor([6])]
>>> # With even more workers
>>> # xdoctest: +IGNORE_WANT("non deterministic")
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=20)))
[tensor([3]), tensor([5]), tensor([4]), tensor([6])]
Example 2: splitting workload across all workers using :attr:`worker_init_fn`::
>>> class MyIterableDataset(torch.utils.data.IterableDataset):
... def __init__(self, start, end):
... super(MyIterableDataset).__init__()
... assert end > start, "this example code only works with end >= start"
... self.start = start
... self.end = end
...
... def __iter__(self):
... return iter(range(self.start, self.end))
...
>>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
>>> ds = MyIterableDataset(start=3, end=7)
>>> # Single-process loading
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
[3, 4, 5, 6]
>>>
>>> # Directly doing multi-process loading yields duplicate data
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
[3, 3, 4, 4, 5, 5, 6, 6]
>>> # Define a `worker_init_fn` that configures each dataset copy differently
>>> def worker_init_fn(worker_id):
... worker_info = torch.utils.data.get_worker_info()
... dataset = worker_info.dataset # the dataset copy in this worker process
... overall_start = dataset.start
... overall_end = dataset.end
... # configure the dataset to only process the split workload
... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
... worker_id = worker_info.id
... dataset.start = overall_start + worker_id * per_worker
... dataset.end = min(dataset.start + per_worker, overall_end)
...
>>> # Mult-process loading with the custom `worker_init_fn`
>>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6].
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
[3, 5, 4, 6]
>>> # With even more workers
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=20, worker_init_fn=worker_init_fn)))
[3, 4, 5, 6]
"""
def __iter__(self) -> Iterator[T_co]:
raise NotImplementedError
def __add__(self, other: Dataset[T_co]):
return ChainDataset([self, other])
# No `def __len__(self)` default? Subclasses raise `TypeError` when needed.
# See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
class TensorDataset(Dataset[Tuple[Tensor, ...]]):
r"""Dataset wrapping tensors.
Each sample will be retrieved by indexing tensors along the first dimension.
Args:
*tensors (Tensor): tensors that have the same size of the first dimension.
"""
tensors: Tuple[Tensor, ...]
def __init__(self, *tensors: Tensor) -> None:
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors), "Size mismatch between tensors"
self.tensors = tensors
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def __len__(self):
return self.tensors[0].size(0)
class ConcatDataset(Dataset[T_co]):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Args:
datasets (sequence): List of datasets to be concatenated
"""
datasets: List[Dataset[T_co]]
cumulative_sizes: List[int]
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets: Iterable[Dataset]) -> None:
super(ConcatDataset, self).__init__()
self.datasets = list(datasets)
assert len(self.datasets) > 0, 'datasets should not be an empty iterable' # type: ignore[arg-type]
for d in self.datasets:
assert not isinstance(d, IterableDataset), "ConcatDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
class ChainDataset(IterableDataset):
r"""Dataset for chaining multiple :class:`IterableDataset` s.
This class is useful to assemble different existing dataset streams. The
chaining operation is done on-the-fly, so concatenating large-scale
datasets with this class will be efficient.
Args:
datasets (iterable of IterableDataset): datasets to be chained together
"""
def __init__(self, datasets: Iterable[Dataset]) -> None:
super(ChainDataset, self).__init__()
self.datasets = datasets
def __iter__(self):
for d in self.datasets:
assert isinstance(d, IterableDataset), "ChainDataset only supports IterableDataset"
for x in d:
yield x
def __len__(self):
total = 0
for d in self.datasets:
assert isinstance(d, IterableDataset), "ChainDataset only supports IterableDataset"
total += len(d) # type: ignore[arg-type]
return total
class Subset(Dataset[T_co]):
r"""
Subset of a dataset at specified indices.
Args:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
dataset: Dataset[T_co]
indices: Sequence[int]
def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None:
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
if isinstance(idx, list):
return self.dataset[[self.indices[i] for i in idx]]
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset: Dataset[T], lengths: Sequence[Union[int, float]],
generator: Optional[Generator] = default_generator) -> List[Subset[T]]:
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
If a list of fractions that sum up to 1 is given,
the lengths will be computed automatically as
floor(frac * len(dataset)) for each fraction provided.
After computing the lengths, if there are any remainders, 1 count will be
distributed in round-robin fashion to the lengths
until there are no remainders left.
Optionally fix the generator for reproducible results, e.g.:
>>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))
>>> random_split(range(30), [0.3, 0.3, 0.4], generator=torch.Generator(
... ).manual_seed(42))
Args:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths or fractions of splits to be produced
generator (Generator): Generator used for the random permutation.
"""
if math.isclose(sum(lengths), 1) and sum(lengths) <= 1:
subset_lengths: List[int] = []
for i, frac in enumerate(lengths):
if frac < 0 or frac > 1:
raise ValueError(f"Fraction at index {i} is not between 0 and 1")
n_items_in_split = int(
math.floor(len(dataset) * frac) # type: ignore[arg-type]
)
subset_lengths.append(n_items_in_split)
remainder = len(dataset) - sum(subset_lengths) # type: ignore[arg-type]
# add 1 to all the lengths in round-robin fashion until the remainder is 0
for i in range(remainder):
idx_to_add_at = i % len(subset_lengths)
subset_lengths[idx_to_add_at] += 1
lengths = subset_lengths
for i, length in enumerate(lengths):
if length == 0:
warnings.warn(f"Length of split at index {i} is 0. "
f"This might result in an empty dataset.")
# Cannot verify that dataset is Sized
if sum(lengths) != len(dataset): # type: ignore[arg-type]
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = randperm(sum(lengths), generator=generator).tolist() # type: ignore[call-overload]
return [Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths)]
|
pytorch-master
|
torch/utils/data/dataset.py
|
import math
from typing import TypeVar, Optional, Iterator
import torch
from . import Sampler, Dataset
import torch.distributed as dist
__all__ = ["DistributedSampler", ]
T_co = TypeVar('T_co', covariant=True)
class DistributedSampler(Sampler[T_co]):
r"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each
process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a
:class:`~torch.utils.data.DataLoader` sampler, and load a subset of the
original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size and that any instance of it always
returns the same elements in the same order.
Args:
dataset: Dataset used for sampling.
num_replicas (int, optional): Number of processes participating in
distributed training. By default, :attr:`world_size` is retrieved from the
current distributed group.
rank (int, optional): Rank of the current process within :attr:`num_replicas`.
By default, :attr:`rank` is retrieved from the current distributed
group.
shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
indices.
seed (int, optional): random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across all
processes in the distributed group. Default: ``0``.
drop_last (bool, optional): if ``True``, then the sampler will drop the
tail of the data to make it evenly divisible across the number of
replicas. If ``False``, the sampler will add extra indices to make
the data evenly divisible across the replicas. Default: ``False``.
.. warning::
In distributed mode, calling the :meth:`set_epoch` method at
the beginning of each epoch **before** creating the :class:`DataLoader` iterator
is necessary to make shuffling work properly across multiple epochs. Otherwise,
the same ordering will be always used.
Example::
>>> # xdoctest: +SKIP
>>> sampler = DistributedSampler(dataset) if is_distributed else None
>>> loader = DataLoader(dataset, shuffle=(sampler is None),
... sampler=sampler)
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
... sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None,
rank: Optional[int] = None, shuffle: bool = True,
seed: int = 0, drop_last: bool = False) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if rank >= num_replicas or rank < 0:
raise ValueError(
"Invalid rank {}, rank should be in the interval"
" [0, {}]".format(rank, num_replicas - 1))
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
(len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
)
else:
self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type]
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self) -> Iterator[T_co]:
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
|
pytorch-master
|
torch/utils/data/distributed.py
|
import time
from typing import Any, List
import torch.utils.data.backward_compatibility
import torch.utils.data.graph_settings
from torch.utils.data import DataLoader, IterDataPipe, communication
from torch.utils.data.datapipes.iter import IterableWrapper
__all__ = [
"DataLoader2",
]
class _ThreadingDataLoader2:
def __init__(self, datapipe, num_workers=0, collate_fn=None):
self.threads = []
self.datapipes = []
self.collate_fn = collate_fn
for worker_id in range(num_workers):
(thread, req_queue, res_queue, thread_localdatapipe) = communication.eventloop.SpawnThreadForDataPipeline(datapipe)
torch.utils.data.graph_settings.apply_sharding(thread_localdatapipe, num_workers, worker_id)
thread.start()
self.threads.append((thread, req_queue, res_queue)) # These queues are independent
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue))
self.datapipes.append(local_datapipe)
def __iter__(self):
not_available = False
forever = True
exclude_datapipes: List[Any] = []
while len(exclude_datapipes) < len(self.datapipes):
for dp in self.datapipes:
if dp not in exclude_datapipes:
try:
value = dp.nonblocking_next()
yield value
except StopIteration:
exclude_datapipes.append(dp)
except communication.iter.NotAvailable:
not_available = True
if not_available:
time.sleep(0.001)
def __del__(self):
self._cleanup_all_threads()
def _cleanup_all_threads(self):
def clean_me(thread, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
thread.join()
for thread, req_queue, res_queue in self.threads:
clean_me(thread, req_queue, res_queue)
class DataLoader2:
def __new__(cls,
dataset,
batch_size=1,
shuffle=None,
sampler=None,
batch_sampler=None,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None,
*,
prefetch_factor=2,
persistent_workers=False,
batch_outside_worker=False,
parallelism_mode='mp'):
if isinstance(dataset, IterDataPipe):
data_loader: Any = None
if batch_sampler is not None:
raise Exception(
'batch_sampler is not yet supported by DataPipes')
if sampler is not None:
raise Exception(
'sampler is not yet supported by DataPipes')
datapipe = dataset
datapipe = torch.utils.data.graph_settings.apply_shuffle_settings(datapipe, shuffle=shuffle) # type: ignore[assignment]
if batch_outside_worker and pin_memory:
raise Exception(
'pin_memory is not yet compatible with batch_outside_worker')
if not batch_outside_worker:
if batch_size is not None:
datapipe = datapipe.batch(batch_size, drop_last=drop_last)
if collate_fn is None:
collate_fn = torch.utils.data._utils.collate.default_collate
# Note: It is safe to pass shuffle=True to the old DataLoader, as shuffle does nothing
# for Iterable, but required to set Pipes correctly.
data_loader = DataLoader(datapipe,
batch_size=None, # Replaced by .batch DataPipe
shuffle=shuffle,
sampler=None,
batch_sampler=None,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=False, # Replaced by .batch DataPipe
timeout=timeout,
worker_init_fn=worker_init_fn,
prefetch_factor=prefetch_factor,
persistent_workers=persistent_workers)
elif parallelism_mode == 'thread':
if collate_fn is not None and not batch_outside_worker:
datapipe = datapipe.map(collate_fn)
if pin_memory:
raise Exception(
'pin_memory is not yet supported by DataPipes with Threading')
if worker_init_fn is not None:
raise Exception(
'worker_init_fn is not yet supported by DataPipes with Threading')
data_loader = _ThreadingDataLoader2(datapipe,
num_workers=num_workers,
collate_fn=collate_fn)
else:
raise Exception('Unsupported parallelism mode', parallelism_mode)
if not batch_outside_worker:
return data_loader
else:
if collate_fn is None:
collate_fn = torch.utils.data._utils.collate.default_collate
datapipe = IterableWrapper(data_loader).batch(
batch_size, drop_last=drop_last).map(collate_fn)
return datapipe
else:
if parallelism_mode == 'thread':
raise Exception(
'thread parallelism mode is not supported for old DataSets')
return DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
prefetch_factor=prefetch_factor,
persistent_workers=persistent_workers)
|
pytorch-master
|
torch/utils/data/dataloader_experimental.py
|
import warnings
def worker_init_fn(worker_id):
warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated"
" as DataLoader automatically applies sharding in every worker")
|
pytorch-master
|
torch/utils/data/backward_compatibility.py
|
r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter
To support these two classes, in `./_utils` we define many utility methods and
functions to be run in multiprocessing. E.g., the data loading worker loop is
in `./_utils/worker.py`.
"""
import functools
import itertools
import logging
import os
import queue
import threading
import time
import warnings
from datetime import timedelta
from typing import Any, Callable, Iterable, TypeVar, Generic, Sequence, List, Optional, Union
import multiprocessing as python_multiprocessing
import torch
import torch.distributed as dist
import torch.multiprocessing as multiprocessing
import torch.utils.data.graph_settings
from torch._utils import ExceptionWrapper
from torch._six import string_classes
from . import (
IterDataPipe,
MapDataPipe,
IterableDataset,
Sampler,
SequentialSampler,
RandomSampler,
BatchSampler,
Dataset,)
from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper
from . import _utils
__all__ = [
"DataLoader",
"get_worker_info",
"default_collate",
"default_convert",
]
T_co = TypeVar('T_co', covariant=True)
T = TypeVar('T')
_worker_init_fn_t = Callable[[int], None]
# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that
# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'.
# See https://github.com/python/mypy/issues/3737.
_collate_fn_t = Callable[[List[T]], Any]
# These functions used to be defined in this file. However, it was moved to
# _utils/collate.py. Although it is rather hard to access this from user land
# (one has to explicitly directly `import torch.utils.data.dataloader`), there
# probably is user code out there using it. This aliasing maintains BC in this
# aspect.
default_collate: _collate_fn_t = _utils.collate.default_collate
default_convert = _utils.collate.default_convert
get_worker_info = _utils.worker.get_worker_info
logger = logging.getLogger(__name__)
class _DatasetKind(object):
Map = 0
Iterable = 1
@staticmethod
def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):
if kind == _DatasetKind.Map:
return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
else:
return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last)
class _InfiniteConstantSampler(Sampler):
r"""Analogous to ``itertools.repeat(None, None)``.
Used as sampler for :class:`~torch.utils.data.IterableDataset`.
Args:
data_source (Dataset): dataset to sample from
"""
def __init__(self):
super(_InfiniteConstantSampler, self).__init__(None)
def __iter__(self):
while True:
yield None
def _get_distributed_settings():
if dist.is_available() and dist.is_initialized():
return dist.get_world_size(), dist.get_rank()
else:
return 1, 0
def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id):
global_worker_id = worker_id
info = torch.utils.data.get_worker_info()
total_workers = info.num_workers
datapipe = info.dataset
# To distribute elements across distributed process evenly, we should shard data on distributed
# processes first then shard on worker processes
total_workers *= world_size
global_worker_id = global_worker_id * world_size + rank_id
torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id)
if worker_init_fn is not None:
worker_init_fn(worker_id)
class DataLoader(Generic[T_co]):
r"""
Data loader. Combines a dataset and a sampler, and provides an iterable over
the given dataset.
The :class:`~torch.utils.data.DataLoader` supports both map-style and
iterable-style datasets with single- or multi-process loading, customizing
loading order and optional automatic batching (collation) and memory pinning.
See :py:mod:`torch.utils.data` documentation page for more details.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: ``1``).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
sampler (Sampler or Iterable, optional): defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented. If specified, :attr:`shuffle` must not be specified.
batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but
returns a batch of indices at a time. Mutually exclusive with
:attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,
and :attr:`drop_last`.
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
collate_fn (Callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
worker_init_fn (Callable, optional): If not ``None``, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: ``None``)
generator (torch.Generator, optional): If not ``None``, this RNG will be used
by RandomSampler to generate random indexes and multiprocessing to generate
`base_seed` for workers. (default: ``None``)
prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
in advance by each worker. ``2`` means there will be a total of
2 * num_workers batches prefetched across all workers. (default: ``2``)
persistent_workers (bool, optional): If ``True``, the data loader will not shutdown
the worker processes after a dataset has been consumed once. This allows to
maintain the workers `Dataset` instances alive. (default: ``False``)
pin_memory_device (str, optional): the data loader will copy Tensors
into device pinned memory before returning them if pin_memory is set to true.
.. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`
cannot be an unpicklable object, e.g., a lambda function. See
:ref:`multiprocessing-best-practices` on more details related
to multiprocessing in PyTorch.
.. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.
When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,
it instead returns an estimate based on ``len(dataset) / batch_size``, with proper
rounding depending on :attr:`drop_last`, regardless of multi-process loading
configurations. This represents the best guess PyTorch can make because PyTorch
trusts user :attr:`dataset` code in correctly handling multi-process
loading to avoid duplicate data.
However, if sharding results in multiple workers having incomplete last batches,
this estimate can still be inaccurate, because (1) an otherwise complete batch can
be broken into multiple ones and (2) more than one batch worth of samples can be
dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such
cases in general.
See `Dataset Types`_ for more details on these two types of datasets and how
:class:`~torch.utils.data.IterableDataset` interacts with
`Multi-process data loading`_.
.. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and
:ref:`data-loading-randomness` notes for random seed related questions.
"""
dataset: Dataset[T_co]
batch_size: Optional[int]
num_workers: int
pin_memory: bool
drop_last: bool
timeout: float
sampler: Union[Sampler, Iterable]
pin_memory_device: str
prefetch_factor: int
_iterator : Optional['_BaseDataLoaderIter']
__initialized = False
def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1,
shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None,
batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None,
num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None,
pin_memory: bool = False, drop_last: bool = False,
timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None,
multiprocessing_context=None, generator=None,
*, prefetch_factor: int = 2,
persistent_workers: bool = False,
pin_memory_device: str = ""):
torch._C._log_api_usage_once("python.data_loader")
if num_workers < 0:
raise ValueError('num_workers option should be non-negative; '
'use num_workers=0 to disable multiprocessing.')
if timeout < 0:
raise ValueError('timeout option should be non-negative')
if num_workers == 0 and prefetch_factor != 2:
raise ValueError('prefetch_factor option could only be specified in multiprocessing.'
'let num_workers > 0 to enable multiprocessing.')
assert prefetch_factor > 0
if persistent_workers and num_workers == 0:
raise ValueError('persistent_workers option needs num_workers > 0')
self.dataset = dataset
self.num_workers = num_workers
self.prefetch_factor = prefetch_factor
self.pin_memory = pin_memory
self.pin_memory_device = pin_memory_device
self.timeout = timeout
self.worker_init_fn = worker_init_fn
self.multiprocessing_context = multiprocessing_context
# Adds several forward compatibilities so classic DataLoader can work with DataPipes
# 1. _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler
# 2. Additional worker init function will take care of sharding in MP and Distributed
if isinstance(self.dataset, IterDataPipe):
self.dataset = _IterDataPipeSerializationWrapper(self.dataset)
ws, rank = _get_distributed_settings()
if num_workers > 0:
self.worker_init_fn = functools.partial(
_sharding_worker_init_fn, self.worker_init_fn, ws, rank)
else:
torch.utils.data.graph_settings.apply_sharding(self.dataset, ws, rank)
elif isinstance(self.dataset, MapDataPipe):
self.dataset = _MapDataPipeSerializationWrapper(self.dataset)
ws, rank = _get_distributed_settings()
if num_workers > 0:
self.worker_init_fn = functools.partial(
_sharding_worker_init_fn, self.worker_init_fn, ws, rank)
else:
torch.utils.data.graph_settings.apply_sharding(self.dataset, ws, rank)
# Arg-check dataset related before checking samplers because we want to
# tell users that iterable-style datasets are incompatible with custom
# samplers first, so that they don't learn that this combo doesn't work
# after spending time fixing the custom sampler errors.
if isinstance(dataset, IterableDataset):
self._dataset_kind = _DatasetKind.Iterable
# NOTE [ Custom Samplers and IterableDataset ]
#
# `IterableDataset` does not support custom `batch_sampler` or
# `sampler` since the key is irrelevant (unless we support
# generator-style dataset one day...).
#
# For `sampler`, we always create a dummy sampler. This is an
# infinite sampler even when the dataset may have an implemented
# finite `__len__` because in multi-process data loading, naive
# settings will return duplicated data (which may be desired), and
# thus using a sampler with length matching that of dataset will
# cause data lost (you may have duplicates of the first couple
# batches, but never see anything afterwards). Therefore,
# `Iterabledataset` always uses an infinite sampler, an instance of
# `_InfiniteConstantSampler` defined above.
#
# A custom `batch_sampler` essentially only controls the batch size.
# However, it is unclear how useful it would be since an iterable-style
# dataset can handle that within itself. Moreover, it is pointless
# in multi-process data loading as the assignment order of batches
# to workers is an implementation detail so users can not control
# how to batchify each worker's iterable. Thus, we disable this
# option. If this turns out to be useful in future, we can re-enable
# this, and support custom samplers that specify the assignments to
# specific workers.
if isinstance(dataset, IterDataPipe):
if shuffle is not None:
dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
# We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default.
elif shuffle not in {False, None}:
raise ValueError(
"DataLoader with IterableDataset: expected unspecified "
"shuffle option, but got shuffle={}".format(shuffle))
if sampler is not None:
# See NOTE [ Custom Samplers and IterableDataset ]
raise ValueError(
"DataLoader with IterableDataset: expected unspecified "
"sampler option, but got sampler={}".format(sampler))
elif batch_sampler is not None:
# See NOTE [ Custom Samplers and IterableDataset ]
raise ValueError(
"DataLoader with IterableDataset: expected unspecified "
"batch_sampler option, but got batch_sampler={}".format(batch_sampler))
else:
shuffle = bool(shuffle)
self._dataset_kind = _DatasetKind.Map
if sampler is not None and shuffle:
raise ValueError('sampler option is mutually exclusive with '
'shuffle')
if batch_sampler is not None:
# auto_collation with custom batch_sampler
if batch_size != 1 or shuffle or sampler is not None or drop_last:
raise ValueError('batch_sampler option is mutually exclusive '
'with batch_size, shuffle, sampler, and '
'drop_last')
batch_size = None
drop_last = False
elif batch_size is None:
# no auto_collation
if drop_last:
raise ValueError('batch_size=None option disables auto-batching '
'and is mutually exclusive with drop_last')
if sampler is None: # give default samplers
if self._dataset_kind == _DatasetKind.Iterable:
# See NOTE [ Custom Samplers and IterableDataset ]
sampler = _InfiniteConstantSampler()
else: # map-style
if shuffle:
sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type]
else:
sampler = SequentialSampler(dataset) # type: ignore[arg-type]
if batch_size is not None and batch_sampler is None:
# auto_collation without custom batch_sampler
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.batch_size = batch_size
self.drop_last = drop_last
self.sampler = sampler
self.batch_sampler = batch_sampler
self.generator = generator
if collate_fn is None:
if self._auto_collation:
collate_fn = _utils.collate.default_collate
else:
collate_fn = _utils.collate.default_convert
self.collate_fn = collate_fn
self.persistent_workers = persistent_workers
self.__initialized = True
self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ]
self._iterator = None
self.check_worker_number_rationality()
torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined]
def _get_iterator(self) -> '_BaseDataLoaderIter':
if self.num_workers == 0:
return _SingleProcessDataLoaderIter(self)
else:
self.check_worker_number_rationality()
return _MultiProcessingDataLoaderIter(self)
@property
def multiprocessing_context(self):
return self.__multiprocessing_context
@multiprocessing_context.setter
def multiprocessing_context(self, multiprocessing_context):
if multiprocessing_context is not None:
if self.num_workers > 0:
if isinstance(multiprocessing_context, string_classes):
valid_start_methods = multiprocessing.get_all_start_methods()
if multiprocessing_context not in valid_start_methods:
raise ValueError(
('multiprocessing_context option '
'should specify a valid start method in {!r}, but got '
'multiprocessing_context={!r}').format(valid_start_methods, multiprocessing_context))
# error: Argument 1 to "get_context" has incompatible type "Union[str, bytes]"; expected "str" [arg-type]
multiprocessing_context = multiprocessing.get_context(multiprocessing_context) # type: ignore[arg-type]
if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext):
raise TypeError(('multiprocessing_context option should be a valid context '
'object or a string specifying the start method, but got '
'multiprocessing_context={}').format(multiprocessing_context))
else:
raise ValueError(('multiprocessing_context can only be used with '
'multi-process loading (num_workers > 0), but got '
'num_workers={}').format(self.num_workers))
self.__multiprocessing_context = multiprocessing_context
def __setattr__(self, attr, val):
if self.__initialized and attr in (
'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'):
raise ValueError('{} attribute should not be set after {} is '
'initialized'.format(attr, self.__class__.__name__))
super(DataLoader, self).__setattr__(attr, val)
# We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up
# since '_BaseDataLoaderIter' references 'DataLoader'.
def __iter__(self) -> '_BaseDataLoaderIter':
# When using a single worker the returned iterator should be
# created everytime to avoid reseting its state
# However, in the case of a multiple workers iterator
# the iterator is only created once in the lifetime of the
# DataLoader object so that workers can be reused
if self.persistent_workers and self.num_workers > 0:
if self._iterator is None:
self._iterator = self._get_iterator()
else:
self._iterator._reset(self)
return self._iterator
else:
return self._get_iterator()
@property
def _auto_collation(self):
return self.batch_sampler is not None
@property
def _index_sampler(self):
# The actual sampler used for generating indices for `_DatasetFetcher`
# (see _utils/fetch.py) to read data at each time. This would be
# `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise.
# We can't change `.sampler` and `.batch_sampler` attributes for BC
# reasons.
if self._auto_collation:
return self.batch_sampler
else:
return self.sampler
def __len__(self) -> int:
if self._dataset_kind == _DatasetKind.Iterable:
# NOTE [ IterableDataset and __len__ ]
#
# For `IterableDataset`, `__len__` could be inaccurate when one naively
# does multi-processing data loading, since the samples will be duplicated.
# However, no real use case should be actually using that behavior, so
# it should count as a user error. We should generally trust user
# code to do the proper thing (e.g., configure each replica differently
# in `__iter__`), and give us the correct `__len__` if they choose to
# implement it (this will still throw if the dataset does not implement
# a `__len__`).
#
# To provide a further warning, we track if `__len__` was called on the
# `DataLoader`, save the returned value in `self._len_called`, and warn
# if the iterator ends up yielding more than this number of samples.
# Cannot statically verify that dataset is Sized
length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type]
if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler
from math import ceil
if self.drop_last:
length = length // self.batch_size
else:
length = ceil(length / self.batch_size)
return length
else:
return len(self._index_sampler)
def check_worker_number_rationality(self):
# This function check whether the dataloader's worker number is rational based on
# current system's resource. Current rule is that if the number of workers this
# Dataloader will create is bigger than the number of logical cpus that is allowed to
# use, than we will pop up a warning to let user pay attention.
#
# eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2
# threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current
# DataLoader process can use half of them which is 32, then the rational max number of
# worker that initiated from this process is 32.
# Now, let's say the created DataLoader has num_works = 40, which is bigger than 32.
# So the warning message is triggered to notify the user to lower the worker number if
# necessary.
#
#
# [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is
# available (available in most of Linux system, but not OSX and Windows).
# When os.sched_getaffinity is not available, os.cpu_count() is called instead, but
# it doesn't repect cpuset.
# We don't take threading into account since each worker process is single threaded
# at this time.
#
# We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc)
# other than `torch.set_num_threads` to 1 in the worker process, if the passing
# in functions use 3rd party modules that rely on those threading flags to determine
# how many thread to create (eg. numpy, etc), then it is caller's responsibility to
# set those flags correctly.
def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked):
suggested_max_worker_msg = ((
"Our suggested max number of worker in current system is {}{}, which is smaller "
"than what this DataLoader is going to create.").format(
num_worker_suggest,
("" if cpuset_checked else " (`cpuset` is not taken into account)"))
) if num_worker_suggest is not None else (
"DataLoader is not able to compute a suggested max number of worker in current system.")
warn_msg = (
"This DataLoader will create {} worker processes in total. {} "
"Please be aware that excessive worker creation might get DataLoader running slow or even freeze, "
"lower the worker number to avoid potential slowness/freeze if necessary.").format(
num_worker_created,
suggested_max_worker_msg)
return warn_msg
if not self.num_workers or self.num_workers == 0:
return
# try to compute a suggested max number of worker based on system's resource
max_num_worker_suggest = None
cpuset_checked = False
if hasattr(os, 'sched_getaffinity'):
try:
max_num_worker_suggest = len(os.sched_getaffinity(0))
cpuset_checked = True
except Exception:
pass
if max_num_worker_suggest is None:
# os.cpu_count() could return Optional[int]
# get cpu count first and check None in order to satify mypy check
cpu_count = os.cpu_count()
if cpu_count is not None:
max_num_worker_suggest = cpu_count
if max_num_worker_suggest is None:
warnings.warn(_create_warning_msg(
max_num_worker_suggest,
self.num_workers,
cpuset_checked))
return
if self.num_workers > max_num_worker_suggest:
warnings.warn(_create_warning_msg(
max_num_worker_suggest,
self.num_workers,
cpuset_checked))
def _get_shared_seed(self):
if isinstance(self.dataset, IterDataPipe):
_shared_seed = torch.empty((), dtype=torch.int64).random_(generator=self.generator).item()
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
ws = dist.get_world_size()
store = dist.distributed_c10d._get_default_store()
if rank == 0:
_shared_seed_str = str(_shared_seed)
store.set(_utils.DATAPIPE_SHARED_SEED, _shared_seed_str)
logger.info(f"Shared seed ({_shared_seed_str}) sent to store on rank 0")
# Use 'add' instead of 'get' since for some store implementations 'add'
# doesn't work well with 'get'.
_shared_seed_recv_cnt = store.add(_utils.DATAPIPE_SHARED_SEED_COUNTER, 1)
start = time.time()
while _shared_seed_recv_cnt < ws:
time.sleep(_utils.DATAPIPE_SHARED_SEED_CHECK_INTERVAL)
_shared_seed_recv_cnt = store.add(_utils.DATAPIPE_SHARED_SEED_COUNTER, 0)
if timedelta(seconds=(time.time() - start)) > \
timedelta(seconds=_utils.DATAPIPE_SHARED_SEED_DEFAULT_TIMEOUT):
raise RuntimeError("Timed out receiving the signal from the distribtued store on "
"Rank 0 that all other Ranks have received the shared seed. "
f"(world_size={ws}, received={_shared_seed_recv_cnt}, "
f"timeout={_utils.DATAPIPE_SHARED_SEED_DEFAULT_TIMEOUT})")
# Reset after all distributed processes have received the shared seed
store.set(_utils.DATAPIPE_SHARED_SEED, "")
_shared_seed_recv_cnt = store.add(_utils.DATAPIPE_SHARED_SEED_COUNTER, -ws)
assert _shared_seed_recv_cnt == 0
else:
_shared_seed_str = ""
start = time.time()
while len(_shared_seed_str) == 0:
time.sleep(_utils.DATAPIPE_SHARED_SEED_CHECK_INTERVAL)
_shared_seed_str = store.get(_utils.DATAPIPE_SHARED_SEED)
if timedelta(seconds=(time.time() - start)) > \
timedelta(seconds=_utils.DATAPIPE_SHARED_SEED_DEFAULT_TIMEOUT):
raise RuntimeError("Timed out receiving the shared seed from the distribtued store "
f"on Rank {rank}. (world_size={ws}, "
f"timeout={_utils.DATAPIPE_SHARED_SEED_DEFAULT_TIMEOUT})")
logger.info(f"Shared seed ({_shared_seed_str}) received from store on rank {rank}")
_shared_seed_recv_cnt = store.add(_utils.DATAPIPE_SHARED_SEED_COUNTER, 1)
# Exit only when all ranks received seed, otherwise we are at risk that current rank
# will reach same section of the code again while rank zero still in the previous iteration
while _shared_seed_recv_cnt > 0:
time.sleep(_utils.DATAPIPE_SHARED_SEED_CHECK_INTERVAL)
_shared_seed_recv_cnt = store.add(_utils.DATAPIPE_SHARED_SEED_COUNTER, 0)
_shared_seed = int(_shared_seed_str)
return _shared_seed
else:
return None
class _BaseDataLoaderIter(object):
def __init__(self, loader: DataLoader) -> None:
self._dataset = loader.dataset
self._shared_seed = loader._get_shared_seed()
if isinstance(self._dataset, IterDataPipe):
shared_rng = torch.Generator()
shared_rng.manual_seed(self._shared_seed)
self._dataset = torch.utils.data.graph_settings.apply_shuffle_seed(self._dataset, shared_rng)
self._dataset_kind = loader._dataset_kind
self._IterableDataset_len_called = loader._IterableDataset_len_called
self._auto_collation = loader._auto_collation
self._drop_last = loader.drop_last
self._index_sampler = loader._index_sampler
self._num_workers = loader.num_workers
self._prefetch_factor = loader.prefetch_factor
# for other backends, pin_memory_device need to set. if not set
# default behaviour is CUDA device. if pin_memory_device is selected
# and pin_memory is not set, the default behaviour false.
if (len(loader.pin_memory_device) == 0):
self._pin_memory = loader.pin_memory and torch.cuda.is_available()
self._pin_memory_device = None
else:
if not loader.pin_memory:
warn_msg = ("pin memory device is set and pin_memory flag is not used then device pinned memory won't be used"
"please set pin_memory to true, if you need to use the device pin memory")
warnings.warn(warn_msg)
self._pin_memory = loader.pin_memory
self._pin_memory_device = loader.pin_memory_device
self._timeout = loader.timeout
self._collate_fn = loader.collate_fn
self._sampler_iter = iter(self._index_sampler)
self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item()
self._persistent_workers = loader.persistent_workers
self._num_yielded = 0
self._profile_name = "enumerate(DataLoader)#{}.__next__".format(self.__class__.__name__)
def __iter__(self) -> '_BaseDataLoaderIter':
return self
def _reset(self, loader, first_iter=False):
self._sampler_iter = iter(self._index_sampler)
self._num_yielded = 0
self._IterableDataset_len_called = loader._IterableDataset_len_called
self._shared_seed = loader._get_shared_seed()
if isinstance(self._dataset, IterDataPipe):
shared_rng = torch.Generator()
shared_rng.manual_seed(self._shared_seed)
self._dataset = torch.utils.data.graph_settings.apply_shuffle_seed(self._dataset, shared_rng)
def _next_index(self):
return next(self._sampler_iter) # may raise StopIteration
def _next_data(self):
raise NotImplementedError
def __next__(self) -> Any:
with torch.autograd.profiler.record_function(self._profile_name):
if self._sampler_iter is None:
# TODO(https://github.com/pytorch/pytorch/issues/76750)
self._reset() # type: ignore[call-arg]
data = self._next_data()
self._num_yielded += 1
if self._dataset_kind == _DatasetKind.Iterable and \
self._IterableDataset_len_called is not None and \
self._num_yielded > self._IterableDataset_len_called:
warn_msg = ("Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} "
"samples have been fetched. ").format(self._dataset, self._IterableDataset_len_called,
self._num_yielded)
if self._num_workers > 0:
warn_msg += ("For multiprocessing data-loading, this could be caused by not properly configuring the "
"IterableDataset replica at each worker. Please see "
"https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.")
warnings.warn(warn_msg)
return data
def __len__(self) -> int:
return len(self._index_sampler)
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("{} cannot be pickled", self.__class__.__name__)
class _SingleProcessDataLoaderIter(_BaseDataLoaderIter):
def __init__(self, loader):
super(_SingleProcessDataLoaderIter, self).__init__(loader)
assert self._timeout == 0
assert self._num_workers == 0
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last)
def _next_data(self):
index = self._next_index() # may raise StopIteration
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
if self._pin_memory:
data = _utils.pin_memory.pin_memory(data, self._pin_memory_device)
return data
class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):
r"""Iterates once over the DataLoader's dataset, as specified by the sampler"""
# NOTE [ Data Loader Multiprocessing Shutdown Logic ]
#
# Preliminary:
#
# Our data model looks like this (queues are indicated with curly brackets):
#
# main process ||
# | ||
# {index_queue} ||
# | ||
# worker processes || DATA
# | ||
# {worker_result_queue} || FLOW
# | ||
# pin_memory_thread of main process || DIRECTION
# | ||
# {data_queue} ||
# | ||
# data output \/
#
# P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if
# `pin_memory=False`.
#
#
# Terminating multiprocessing logic requires very careful design. In
# particular, we need to make sure that
#
# 1. The iterator gracefully exits the workers when its last reference is
# gone or it is depleted.
#
# In this case, the workers should be gracefully exited because the
# main process may still need to continue to run, and we want cleaning
# up code in the workers to be executed (e.g., releasing GPU memory).
# Naturally, we implement the shutdown logic in `__del__` of
# DataLoaderIterator.
#
# We delay the discussion on the logic in this case until later.
#
# 2. The iterator exits the workers when the loader process and/or worker
# processes exits normally or with error.
#
# We set all workers and `pin_memory_thread` to have `daemon=True`.
#
# You may ask, why can't we make the workers non-daemonic, and
# gracefully exit using the same logic as we have in `__del__` when the
# iterator gets deleted (see 1 above)?
#
# First of all, `__del__` is **not** guaranteed to be called when
# interpreter exits. Even if it is called, by the time it executes,
# many Python core library resources may alreay be freed, and even
# simple things like acquiring an internal lock of a queue may hang.
# Therefore, in this case, we actually need to prevent `__del__` from
# being executed, and rely on the automatic termination of daemonic
# children.
#
# Thus, we register an `atexit` hook that sets a global flag
# `_utils.python_exit_status`. Since `atexit` hooks are executed in the
# reverse order of registration, we are guaranteed that this flag is
# set before library resources we use are freed (which, at least in
# CPython, is done via an `atexit` handler defined in
# `multiprocessing/util.py`
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362
# registered when an object requiring this mechanism is first
# created, e.g., `mp.Queue`
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29
# )
#
# So in `__del__`, we check if `_utils.python_exit_status` is set or
# `None` (freed), and perform no-op if so.
#
# However, simply letting library clean-up codes run can also be bad,
# because such codes (i.e., `multiprocessing.util._exit_function()`)
# include join putting threads for `mp.Queue`, which can be blocking.
# Hence, the main process putting threads are called with
# `cancel_join_thread` at creation. See later section
# [ 3b. A process won't hang when putting into a queue; ]
# for more details.
#
# Here are two example cases where library clean-up codes can run
# before `__del__` is called:
#
# 1. If we hold onto a reference to the iterator, it more often
# than not tries to do `multiprocessing` library cleaning before
# clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666)
# and thus prevents our cleaning-up code to run first.
#
# 2. A similar issue araises when a `DataLoader` is used in a subprocess.
# When a process ends, it shuts the all its daemonic children
# down with a SIGTERM (instead of joining them without a timeout).
# Simiarly for threads, but by a different mechanism. This fact,
# together with a few implementation details of multiprocessing, forces
# us to make workers daemonic. All of our problems arise when a
# DataLoader is used in a subprocess, and are caused by multiprocessing
# code which looks more or less like this:
#
# try:
# your_function_using_a_dataloader()
# finally:
# multiprocessing.util._exit_function()
#
# The joining/termination mentioned above happens inside
# `_exit_function()`. Now, if `your_function_using_a_dataloader()`
# throws, the stack trace stored in the exception will prevent the
# frame which uses `DataLoaderIter` to be freed. If the frame has any
# reference to the `DataLoaderIter` (e.g., in a method of the iter),
# its `__del__`, which starts the shutdown procedure, will not be
# called. That, in turn, means that workers aren't notified. Attempting
# to join in `_exit_function` will then result in a hang.
#
# For context, `_exit_function` is also registered as an `atexit` call.
# So it is unclear to me (@ssnl) why this is needed in a finally block.
# The code dates back to 2008 and there is no comment on the original
# PEP 371 or patch https://bugs.python.org/issue3050 (containing both
# the finally block and the `atexit` registration) that explains this.
#
#
# Finally, another choice is to just shutdown workers with logic in 1
# above whenever we see an error in `next`. This isn't ideal because
# a. It prevents users from using try-catch to resume data loading.
# b. It doesn't prevent hanging if users have references to the
# iterator.
#
# 3. All processes exit if any of them die unexpectedly by fatal signals.
#
# As shown above, the workers are set as daemonic children of the main
# process. However, automatic cleaning-up of such child processes only
# happens if the parent process exits gracefully (e.g., not via fatal
# signals like SIGKILL). So we must ensure that each process will exit
# even the process that should send/receive data to/from it were
# killed, i.e.,
#
# a. A process won't hang when getting from a queue.
#
# Even with carefully designed data dependencies (i.e., a `put()`
# always corresponding to a `get()`), hanging on `get()` can still
# happen when data in queue is corrupted (e.g., due to
# `cancel_join_thread` or unexpected exit).
#
# For child exit, we set a timeout whenever we try to get data
# from `data_queue`, and check the workers' status on each timeout
# and error.
# See `_DataLoaderiter._get_batch()` and
# `_DataLoaderiter._try_get_data()` for details.
#
# Additionally, for child exit on non-Windows platforms, we also
# register a SIGCHLD handler (which is supported on Windows) on
# the main process, which checks if any of the workers fail in the
# (Python) handler. This is more efficient and faster in detecting
# worker failures, compared to only using the above mechanism.
# See `DataLoader.cpp` and `_utils/signal_handling.py` for details.
#
# For `.get()` calls where the sender(s) is not the workers, we
# guard them with timeouts, and check the status of the sender
# when timeout happens:
# + in the workers, the `_utils.worker.ManagerWatchdog` class
# checks the status of the main process.
# + if `pin_memory=True`, when getting from `pin_memory_thread`,
# check `pin_memory_thread` status periodically until `.get()`
# returns or see that `pin_memory_thread` died.
#
# b. A process won't hang when putting into a queue;
#
# We use `mp.Queue` which has a separate background thread to put
# objects from an unbounded buffer array. The background thread is
# daemonic and usually automatically joined when the process
# *exits*.
#
# In case that the receiver has ended abruptly while
# reading from the pipe, the join will hang forever. The usual
# solution for this in Python is calling `q.cancel_join_thread`,
# which prevents automatically joining it when finalizing
# (exiting).
#
# Nonetheless, `cancel_join_thread` must only be called when the
# queue is **not** going to be read from or write into by another
# process, because it may hold onto a lock or leave corrupted data
# in the queue, leading other readers/writers to hang.
#
# Hence,
# + For worker processes, we only do so (for their output
# queues, i.e., `worker_result_queue`) before exiting.
# + For `pin_memory_thread`, its output queue `data_queue` is a
# `queue.Queue` that does blocking `put` if the queue is full.
# So there is no above problem, but as a result, in
# `_pin_memory_loop`, we do need to wrap the `put` in a loop
# that breaks not only upon success, but also when the main
# process stops reading, i.e., is shutting down.
# + For loader process, we `cancel_join_thread()` for all
# `_index_queues` because the whole purpose of workers and
# `pin_memory_thread` is to serve the loader process. If
# loader process is already exiting, we don't really care if
# the queues are corrupted.
#
#
# Now let's get back to 1:
# how we gracefully exit the workers when the last reference to the
# iterator is gone.
#
# To achieve this, we implement the following logic along with the design
# choices mentioned above:
#
# `workers_done_event`:
# A `multiprocessing.Event` shared among the main process and all worker
# processes. This is used to signal the workers that the iterator is
# shutting down. After it is set, they will not send processed data to
# queues anymore, and only wait for the final `None` before exiting.
# `done_event` isn't strictly needed. I.e., we can just check for `None`
# from the input queue, but it allows us to skip wasting resources
# processing data if we are already shutting down.
#
# `pin_memory_thread_done_event`:
# A `threading.Event` for a similar purpose to that of
# `workers_done_event`, but is for the `pin_memory_thread`. The reason
# that separate events are needed is that `pin_memory_thread` reads from
# the output queue of the workers. But the workers, upon seeing that
# `workers_done_event` is set, only wants to see the final `None`, and is
# not required to flush all data in the output queue (e.g., it may call
# `cancel_join_thread` on that queue if its `IterableDataset` iterator
# happens to exhaust coincidentally, which is out of the control of the
# main process). Thus, since we will exit `pin_memory_thread` before the
# workers (see below), two separete events are used.
#
# NOTE: In short, the protocol is that the main process will set these
# `done_event`s and then the corresponding processes/threads a `None`,
# and that they may exit at any time after receiving the `None`.
#
# NOTE: Using `None` as the final signal is valid, since normal data will
# always be a 2-tuple with the 1st element being the index of the data
# transferred (different from dataset index/key), and the 2nd being
# either the dataset key or the data sample (depending on which part
# of the data model the queue is at).
#
# [ worker processes ]
# While loader process is alive:
# Get from `index_queue`.
# If get anything else,
# Check `workers_done_event`.
# If set, continue to next iteration
# i.e., keep getting until see the `None`, then exit.
# Otherwise, process data:
# If is fetching from an `IterableDataset` and the iterator
# is exhausted, send an `_IterableDatasetStopIteration`
# object to signal iteration end. The main process, upon
# receiving such an object, will send `None` to this
# worker and not use the corresponding `index_queue`
# anymore.
# If timed out,
# No matter `workers_done_event` is set (still need to see `None`)
# or not, must continue to next iteration.
# (outside loop)
# If `workers_done_event` is set, (this can be False with `IterableDataset`)
# `data_queue.cancel_join_thread()`. (Everything is ending here:
# main process won't read from it;
# other workers will also call
# `cancel_join_thread`.)
#
# [ pin_memory_thread ]
# # No need to check main thread. If this thread is alive, the main loader
# # thread must be alive, because this thread is set as daemonic.
# While `pin_memory_thread_done_event` is not set:
# Get from `index_queue`.
# If timed out, continue to get in the next iteration.
# Otherwise, process data.
# While `pin_memory_thread_done_event` is not set:
# Put processed data to `data_queue` (a `queue.Queue` with blocking put)
# If timed out, continue to put in the next iteration.
# Otherwise, break, i.e., continuing to the out loop.
#
# NOTE: we don't check the status of the main thread because
# 1. if the process is killed by fatal signal, `pin_memory_thread`
# ends.
# 2. in other cases, either the cleaning-up in __del__ or the
# automatic exit of daemonic thread will take care of it.
# This won't busy-wait either because `.get(timeout)` does not
# busy-wait.
#
# [ main process ]
# In the DataLoader Iter's `__del__`
# b. Exit `pin_memory_thread`
# i. Set `pin_memory_thread_done_event`.
# ii Put `None` in `worker_result_queue`.
# iii. Join the `pin_memory_thread`.
# iv. `worker_result_queue.cancel_join_thread()`.
#
# c. Exit the workers.
# i. Set `workers_done_event`.
# ii. Put `None` in each worker's `index_queue`.
# iii. Join the workers.
# iv. Call `.cancel_join_thread()` on each worker's `index_queue`.
#
# NOTE: (c) is better placed after (b) because it may leave corrupted
# data in `worker_result_queue`, which `pin_memory_thread`
# reads from, in which case the `pin_memory_thread` can only
# happen at timeing out, which is slow. Nonetheless, same thing
# happens if a worker is killed by signal at unfortunate times,
# but in other cases, we are better off having a non-corrupted
# `worker_result_queue` for `pin_memory_thread`.
#
# NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b)
# can be omitted
#
# NB: `done_event`s isn't strictly needed. E.g., we can just check for
# `None` from `index_queue`, but it allows us to skip wasting resources
# processing indices already in `index_queue` if we are already shutting
# down.
def __init__(self, loader):
super(_MultiProcessingDataLoaderIter, self).__init__(loader)
assert self._num_workers > 0
assert self._prefetch_factor > 0
if loader.multiprocessing_context is None:
multiprocessing_context = multiprocessing
else:
multiprocessing_context = loader.multiprocessing_context
self._worker_init_fn = loader.worker_init_fn
# No certainty which module multiprocessing_context is
self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
self._worker_pids_set = False
self._shutdown = False
self._workers_done_event = multiprocessing_context.Event()
self._index_queues = []
self._workers = []
for i in range(self._num_workers):
# No certainty which module multiprocessing_context is
index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
# Need to `cancel_join_thread` here!
# See sections (2) and (3b) above.
index_queue.cancel_join_thread()
w = multiprocessing_context.Process(
target=_utils.worker._worker_loop,
args=(self._dataset_kind, self._dataset, index_queue,
self._worker_result_queue, self._workers_done_event,
self._auto_collation, self._collate_fn, self._drop_last,
self._base_seed, self._worker_init_fn, i, self._num_workers,
self._persistent_workers, self._shared_seed))
w.daemon = True
# NB: Process.start() actually take some time as it needs to
# start a process and pass the arguments over via a pipe.
# Therefore, we only add a worker to self._workers list after
# it started, so that we do not call .join() if program dies
# before it starts, and __del__ tries to join but will get:
# AssertionError: can only join a started process.
w.start()
self._index_queues.append(index_queue)
self._workers.append(w)
if self._pin_memory:
self._pin_memory_thread_done_event = threading.Event()
# Queue is not type-annotated
self._data_queue = queue.Queue() # type: ignore[var-annotated]
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(self._worker_result_queue, self._data_queue,
torch.cuda.current_device(),
self._pin_memory_thread_done_event, self._pin_memory_device))
pin_memory_thread.daemon = True
pin_memory_thread.start()
# Similar to workers (see comment above), we only register
# pin_memory_thread once it is started.
self._pin_memory_thread = pin_memory_thread
else:
self._data_queue = self._worker_result_queue
# In some rare cases, persistent workers (daemonic processes)
# would be terminated before `__del__` of iterator is invoked
# when main process exits
# It would cause failure when pin_memory_thread tries to read
# corrupted data from worker_result_queue
# atexit is used to shutdown thread and child processes in the
# right sequence before main process exits
if self._persistent_workers and self._pin_memory:
import atexit
for w in self._workers:
atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w)
# .pid can be None only before process is spawned (not the case, so ignore)
_utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc]
_utils.signal_handling._set_SIGCHLD_handler()
self._worker_pids_set = True
self._reset(loader, first_iter=True)
def _reset(self, loader, first_iter=False):
super()._reset(loader, first_iter)
self._send_idx = 0 # idx of the next task to be sent to workers
self._rcvd_idx = 0 # idx of the next task to be returned in __next__
# information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx).
# map: task idx => - (worker_id,) if data isn't fetched (outstanding)
# \ (worker_id, data) if data is already fetched (out-of-order)
self._task_info = {}
self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1)
# A list of booleans representing whether each worker still has work to
# do, i.e., not having exhausted its iterable dataset object. It always
# contains all `True`s if not using an iterable-style dataset
# (i.e., if kind != Iterable).
# Not that this indicates that a worker still has work to do *for this epoch*.
# It does not mean that a worker is dead. In case of `_persistent_workers`,
# the worker will be reset to available in the next epoch.
self._workers_status = [True for i in range(self._num_workers)]
# Reset the worker queue cycle so it resumes next epoch at worker 0
self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers))
# We resume the prefetching in case it was enabled
if not first_iter:
for idx in range(self._num_workers):
self._index_queues[idx].put(_utils.worker._ResumeIteration(self._shared_seed))
resume_iteration_cnt = self._num_workers
while resume_iteration_cnt > 0:
return_idx, return_data = self._get_data()
if isinstance(return_idx, _utils.worker._ResumeIteration):
assert return_data is None
resume_iteration_cnt -= 1
# prime the prefetch loop
for _ in range(self._prefetch_factor * self._num_workers):
self._try_put_index()
def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):
# Tries to fetch data from `self._data_queue` once for a given timeout.
# This can also be used as inner loop of fetching without timeout, with
# the sender status as the loop condition.
#
# This raises a `RuntimeError` if any worker died expectedly. This error
# can come from either the SIGCHLD handler in `_utils/signal_handling.py`
# (only for non-Windows platforms), or the manual check below on errors
# and timeouts.
#
# Returns a 2-tuple:
# (bool: whether successfully get data, any: data if successful else None)
try:
data = self._data_queue.get(timeout=timeout)
return (True, data)
except Exception as e:
# At timeout and error, we manually check whether any worker has
# failed. Note that this is the only mechanism for Windows to detect
# worker failures.
failed_workers = []
for worker_id, w in enumerate(self._workers):
if self._workers_status[worker_id] and not w.is_alive():
failed_workers.append(w)
self._mark_worker_as_unavailable(worker_id)
if len(failed_workers) > 0:
pids_str = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e
if isinstance(e, queue.Empty):
return (False, None)
import tempfile
import errno
try:
# Raise an exception if we are this close to the FDs limit.
# Apparently, trying to open only one file is not a sufficient
# test.
# See NOTE [ DataLoader on Linux and open files limit ]
fds_limit_margin = 10
fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)]
except OSError as e:
if e.errno == errno.EMFILE:
raise RuntimeError(
"Too many open files. Communication with the"
" workers is no longer possible. Please increase the"
" limit using `ulimit -n` in the shell or change the"
" sharing strategy by calling"
" `torch.multiprocessing.set_sharing_strategy('file_system')`"
" at the beginning of your code") from None
raise
# NOTE [ DataLoader on Linux and open files limit ]
#
# On Linux when DataLoader is used with multiprocessing we pass the data between
# the root process and the workers through SHM files. We remove those files from
# the filesystem as soon as they are created and keep them alive by
# passing around their file descriptors through AF_UNIX sockets. (See
# docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in
# the wiki (https://github.com/pytorch/pytorch/wiki).)
#
# This sometimes leads us to exceeding the open files limit. When that happens,
# and the offending file descriptor is coming over a socket, the `socket` Python
# package silently strips the file descriptor from the message, setting only the
# `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that
# it _indicates that some control data were discarded due to lack of space in
# the buffer for ancillary data_). This might reflect the C implementation of
# AF_UNIX sockets.
#
# This behaviour can be reproduced with the script and instructions at the
# bottom of this note.
#
# When that happens, the standard Python `multiprocessing` (and not
# `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata`
#
# Sometimes, instead of the FD being stripped, you may get an `OSError:
# Too many open files`, both in the script below and in DataLoader. However,
# this is rare and seems to be nondeterministic.
#
#
# #!/usr/bin/env python3
# import sys
# import socket
# import os
# import array
# import shutil
# import socket
#
#
# if len(sys.argv) != 4:
# print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)")
# sys.exit(1)
#
# if __name__ == '__main__':
# dirname = sys.argv[1]
# sock_path = dirname + "/sock"
# iterations = int(sys.argv[2])
# def dummy_path(i):
# return dirname + "/" + str(i) + ".dummy"
#
#
# if sys.argv[3] == 'send':
# while not os.path.exists(sock_path):
# pass
# client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# client.connect(sock_path)
# for i in range(iterations):
# fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT)
# ancdata = array.array('i', [fd])
# msg = bytes([i % 256])
# print("Sending fd ", fd, " (iteration #", i, ")")
# client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)])
#
#
# else:
# assert sys.argv[3] == 'recv'
#
# if os.path.exists(dirname):
# raise Exception("Directory exists")
#
# os.mkdir(dirname)
#
# print("Opening socket...")
# server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# server.bind(sock_path)
#
# print("Listening...")
# for i in range(iterations):
# a = array.array('i')
# msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize))
# assert(len(ancdata) == 1)
# cmsg_level, cmsg_type, cmsg_data = ancdata[0]
# a.frombytes(cmsg_data)
# print("Received fd ", a[0], " (iteration #", i, ")")
#
# shutil.rmtree(dirname)
#
# Steps to reproduce:
#
# 1. Run two shells and set lower file descriptor limit in the receiving one:
# (shell1) ulimit -n 1020
# (shell2) ulimit -n 1022
#
# 2. Run the script above with the `recv` option in the first shell
# (shell1) ./test_socket.py sock_tmp 1017 recv
#
# 3. Run the script with the `send` option in the second shell:
# (shell2) ./test_socket.py sock_tmp 1017 send
def _get_data(self):
# Fetches data from `self._data_queue`.
#
# We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,
# which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)`
# in a loop. This is the only mechanism to detect worker failures for
# Windows. For other platforms, a SIGCHLD handler is also used for
# worker failure detection.
#
# If `pin_memory=True`, we also need check if `pin_memory_thread` had
# died at timeouts.
if self._timeout > 0:
success, data = self._try_get_data(self._timeout)
if success:
return data
else:
raise RuntimeError('DataLoader timed out after {} seconds'.format(self._timeout))
elif self._pin_memory:
while self._pin_memory_thread.is_alive():
success, data = self._try_get_data()
if success:
return data
else:
# while condition is false, i.e., pin_memory_thread died.
raise RuntimeError('Pin memory thread exited unexpectedly')
# In this case, `self._data_queue` is a `queue.Queue`,. But we don't
# need to call `.task_done()` because we don't use `.join()`.
else:
while True:
success, data = self._try_get_data()
if success:
return data
def _next_data(self):
while True:
# If the worker responsible for `self._rcvd_idx` has already ended
# and was unable to fulfill this task (due to exhausting an `IterableDataset`),
# we try to advance `self._rcvd_idx` to find the next valid index.
#
# This part needs to run in the loop because both the `self._get_data()`
# call and `_IterableDatasetStopIteration` check below can mark
# extra worker(s) as dead.
while self._rcvd_idx < self._send_idx:
info = self._task_info[self._rcvd_idx]
worker_id = info[0]
if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active
break
del self._task_info[self._rcvd_idx]
self._rcvd_idx += 1
else:
# no valid `self._rcvd_idx` is found (i.e., didn't break)
if not self._persistent_workers:
self._shutdown_workers()
raise StopIteration
# Now `self._rcvd_idx` is the batch index we want to fetch
# Check if the next sample has already been generated
if len(self._task_info[self._rcvd_idx]) == 2:
data = self._task_info.pop(self._rcvd_idx)[1]
return self._process_data(data)
assert not self._shutdown and self._tasks_outstanding > 0
idx, data = self._get_data()
self._tasks_outstanding -= 1
if self._dataset_kind == _DatasetKind.Iterable:
# Check for _IterableDatasetStopIteration
if isinstance(data, _utils.worker._IterableDatasetStopIteration):
if self._persistent_workers:
self._workers_status[data.worker_id] = False
else:
self._mark_worker_as_unavailable(data.worker_id)
self._try_put_index()
continue
if idx != self._rcvd_idx:
# store out-of-order samples
self._task_info[idx] += (data,)
else:
del self._task_info[idx]
return self._process_data(data)
def _try_put_index(self):
assert self._tasks_outstanding < self._prefetch_factor * self._num_workers
try:
index = self._next_index()
except StopIteration:
return
for _ in range(self._num_workers): # find the next active worker, if any
worker_queue_idx = next(self._worker_queue_idx_cycle)
if self._workers_status[worker_queue_idx]:
break
else:
# not found (i.e., didn't break)
return
self._index_queues[worker_queue_idx].put((self._send_idx, index))
self._task_info[self._send_idx] = (worker_queue_idx,)
self._tasks_outstanding += 1
self._send_idx += 1
def _process_data(self, data):
self._rcvd_idx += 1
self._try_put_index()
if isinstance(data, ExceptionWrapper):
data.reraise()
return data
def _mark_worker_as_unavailable(self, worker_id, shutdown=False):
# Mark a worker as having finished its work e.g., due to
# exhausting an `IterableDataset`. This should be used only when this
# `_MultiProcessingDataLoaderIter` is going to continue running.
assert self._workers_status[worker_id] or (self._persistent_workers and shutdown)
# Signal termination to that specific worker.
q = self._index_queues[worker_id]
# Indicate that no more data will be put on this queue by the current
# process.
q.put(None)
# Note that we don't actually join the worker here, nor do we remove the
# worker's pid from C side struct because (1) joining may be slow, and
# (2) since we don't join, the worker may still raise error, and we
# prefer capturing those, rather than ignoring them, even though they
# are raised after the worker has finished its job.
# Joinning is deferred to `_shutdown_workers`, which it is called when
# all workers finish their jobs (e.g., `IterableDataset` replicas) or
# when this iterator is garbage collected.
self._workers_status[worker_id] = False
assert self._workers_done_event.is_set() == shutdown
def _shutdown_workers(self):
# Called when shutting down this `_MultiProcessingDataLoaderIter`.
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on
# the logic of this function.
python_exit_status = _utils.python_exit_status
if python_exit_status is True or python_exit_status is None:
# See (2) of the note. If Python is shutting down, do no-op.
return
# Normal exit when last reference is gone / iterator is depleted.
# See (1) and the second half of the note.
if not self._shutdown:
self._shutdown = True
try:
# Normal exit when last reference is gone / iterator is depleted.
# See (1) and the second half of the note.
# Exit `pin_memory_thread` first because exiting workers may leave
# corrupted data in `worker_result_queue` which `pin_memory_thread`
# reads from.
if hasattr(self, '_pin_memory_thread'):
# Use hasattr in case error happens before we set the attribute.
self._pin_memory_thread_done_event.set()
# Send something to pin_memory_thread in case it is waiting
# so that it can wake up and check `pin_memory_thread_done_event`
self._worker_result_queue.put((None, None))
self._pin_memory_thread.join()
self._worker_result_queue.cancel_join_thread()
self._worker_result_queue.close()
# Exit workers now.
self._workers_done_event.set()
for worker_id in range(len(self._workers)):
# Get number of workers from `len(self._workers)` instead of
# `self._num_workers` in case we error before starting all
# workers.
# If we are using workers_status with persistent_workers
# we have to shut it down because the worker is paused
if self._persistent_workers or self._workers_status[worker_id]:
self._mark_worker_as_unavailable(worker_id, shutdown=True)
for w in self._workers:
# We should be able to join here, but in case anything went
# wrong, we set a timeout and if the workers fail to join,
# they are killed in the `finally` block.
w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
for q in self._index_queues:
q.cancel_join_thread()
q.close()
finally:
# Even though all this function does is putting into queues that
# we have called `cancel_join_thread` on, weird things can
# happen when a worker is killed by a signal, e.g., hanging in
# `Event.set()`. So we need to guard this with SIGCHLD handler,
# and remove pids from the C side data structure only at the
# end.
#
# FIXME: Unfortunately, for Windows, we are missing a worker
# error detection mechanism here in this function, as it
# doesn't provide a SIGCHLD handler.
if self._worker_pids_set:
_utils.signal_handling._remove_worker_pids(id(self))
self._worker_pids_set = False
for w in self._workers:
if w.is_alive():
# Existing mechanisms try to make the workers exit
# peacefully, but in case that we unfortunately reach
# here, which we shouldn't, (e.g., pytorch/pytorch#39570),
# we kill the worker.
w.terminate()
# staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter`
@staticmethod
def _clean_up_worker(w):
try:
w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
finally:
if w.is_alive():
w.terminate()
def __del__(self):
self._shutdown_workers()
|
pytorch-master
|
torch/utils/data/dataloader.py
|
import torch
from torch import Tensor
from typing import Iterator, Iterable, Optional, Sequence, List, TypeVar, Generic, Sized, Union
__all__ = [
"BatchSampler",
"RandomSampler",
"Sampler",
"SequentialSampler",
"SubsetRandomSampler",
"WeightedRandomSampler",
]
T_co = TypeVar('T_co', covariant=True)
class Sampler(Generic[T_co]):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
way to iterate over indices of dataset elements, and a :meth:`__len__` method
that returns the length of the returned iterators.
.. note:: The :meth:`__len__` method isn't strictly required by
:class:`~torch.utils.data.DataLoader`, but is expected in any
calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
"""
def __init__(self, data_source: Optional[Sized]) -> None:
pass
def __iter__(self) -> Iterator[T_co]:
raise NotImplementedError
# NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
#
# Many times we have an abstract class representing a collection/iterable of
# data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally
# implementing a `__len__` method. In such cases, we must make sure to not
# provide a default implementation, because both straightforward default
# implementations have their issues:
#
# + `return NotImplemented`:
# Calling `len(subclass_instance)` raises:
# TypeError: 'NotImplementedType' object cannot be interpreted as an integer
#
# + `raise NotImplementedError()`:
# This prevents triggering some fallback behavior. E.g., the built-in
# `list(X)` tries to call `len(X)` first, and executes a different code
# path if the method is not found or `NotImplemented` is returned, while
# raising an `NotImplementedError` will propagate and and make the call
# fail where it could have use `__iter__` to complete the call.
#
# Thus, the only two sensible things to do are
#
# + **not** provide a default `__len__`.
#
# + raise a `TypeError` instead, which is what Python uses when users call
# a method that is not defined on an object.
# (@ssnl verifies that this works on at least Python 3.7.)
class SequentialSampler(Sampler[int]):
r"""Samples elements sequentially, always in the same order.
Args:
data_source (Dataset): dataset to sample from
"""
data_source: Sized
def __init__(self, data_source: Sized) -> None:
self.data_source = data_source
def __iter__(self) -> Iterator[int]:
return iter(range(len(self.data_source)))
def __len__(self) -> int:
return len(self.data_source)
class RandomSampler(Sampler[int]):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Args:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`.
generator (Generator): Generator used in sampling.
"""
data_source: Sized
replacement: bool
def __init__(self, data_source: Sized, replacement: bool = False,
num_samples: Optional[int] = None, generator=None) -> None:
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
if not isinstance(self.replacement, bool):
raise TypeError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self) -> int:
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
if self.generator is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = self.generator
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=generator).tolist()
yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=generator).tolist()
else:
for _ in range(self.num_samples // n):
yield from torch.randperm(n, generator=generator).tolist()
yield from torch.randperm(n, generator=generator).tolist()[:self.num_samples % n]
def __len__(self) -> int:
return self.num_samples
class SubsetRandomSampler(Sampler[int]):
r"""Samples elements randomly from a given list of indices, without replacement.
Args:
indices (sequence): a sequence of indices
generator (Generator): Generator used in sampling.
"""
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self) -> Iterator[int]:
for i in torch.randperm(len(self.indices), generator=self.generator):
yield self.indices[i]
def __len__(self) -> int:
return len(self.indices)
class WeightedRandomSampler(Sampler[int]):
r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
Args:
weights (sequence) : a sequence of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
generator (Generator): Generator used in sampling.
Example:
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
[4, 4, 1, 4, 5]
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
[0, 1, 4, 3, 2]
"""
weights: Tensor
num_samples: int
replacement: bool
def __init__(self, weights: Sequence[float], num_samples: int,
replacement: bool = True, generator=None) -> None:
if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
weights_tensor = torch.as_tensor(weights, dtype=torch.double)
if len(weights_tensor.shape) != 1:
raise ValueError("weights should be a 1d sequence but given "
"weights have shape {}".format(tuple(weights_tensor.shape)))
self.weights = weights_tensor
self.num_samples = num_samples
self.replacement = replacement
self.generator = generator
def __iter__(self) -> Iterator[int]:
rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator)
yield from iter(rand_tensor.tolist())
def __len__(self) -> int:
return self.num_samples
class BatchSampler(Sampler[List[int]]):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler or Iterable): Base sampler. Can be any iterable object
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler: Union[Sampler[int], Iterable[int]], batch_size: int, drop_last: bool) -> None:
# Since collections.abc.Iterable does not check for `__getitem__`, which
# is one way for an object to be an iterable, we don't do an `isinstance`
# check here.
if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self) -> Iterator[List[int]]:
# Implemented based on the benchmarking in https://github.com/pytorch/pytorch/pull/76951
if self.drop_last:
sampler_iter = iter(self.sampler)
while True:
try:
batch = [next(sampler_iter) for _ in range(self.batch_size)]
yield batch
except StopIteration:
break
else:
batch = [0] * self.batch_size
idx_in_batch = 0
for idx in self.sampler:
batch[idx_in_batch] = idx
idx_in_batch += 1
if idx_in_batch == self.batch_size:
yield batch
idx_in_batch = 0
batch = [0] * self.batch_size
if idx_in_batch > 0:
yield batch[:idx_in_batch]
def __len__(self) -> int:
# Can only be called if self.sampler has __len__ implemented
# We cannot enforce this condition, so we turn off typechecking for the
# implementation below.
# Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
if self.drop_last:
return len(self.sampler) // self.batch_size # type: ignore[arg-type]
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type]
|
pytorch-master
|
torch/utils/data/sampler.py
|
r""""Contains definitions of the methods used by the _BaseDataLoaderIter to fetch
data from an iterable-style or map-style dataset. This logic is shared in both
single- and multi-processing data loading.
"""
class _BaseDatasetFetcher(object):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
self.dataset = dataset
self.auto_collation = auto_collation
self.collate_fn = collate_fn
self.drop_last = drop_last
def fetch(self, possibly_batched_index):
raise NotImplementedError()
class _IterableDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_IterableDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
self.dataset_iter = iter(dataset)
self.ended = False
def fetch(self, possibly_batched_index):
if self.ended:
raise StopIteration
if self.auto_collation:
data = []
for _ in possibly_batched_index:
try:
data.append(next(self.dataset_iter))
except StopIteration:
self.ended = True
break
if len(data) == 0 or (self.drop_last and len(data) < len(possibly_batched_index)):
raise StopIteration
else:
data = next(self.dataset_iter)
return self.collate_fn(data)
class _MapDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super(_MapDatasetFetcher, self).__init__(dataset, auto_collation, collate_fn, drop_last)
def fetch(self, possibly_batched_index):
if self.auto_collation:
data = [self.dataset[idx] for idx in possibly_batched_index]
else:
data = self.dataset[possibly_batched_index]
return self.collate_fn(data)
|
pytorch-master
|
torch/utils/data/_utils/fetch.py
|
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import torch
import random
import os
import queue
from dataclasses import dataclass
from torch._utils import ExceptionWrapper
from typing import Optional, Union
from . import signal_handling, MP_STATUS_CHECK_INTERVAL, IS_WINDOWS, HAS_NUMPY
if IS_WINDOWS:
import ctypes
from ctypes.wintypes import DWORD, BOOL, HANDLE
# On Windows, the parent ID of the worker process remains unchanged when the manager process
# is gone, and the only way to check it through OS is to let the worker have a process handle
# of the manager and ask if the process status has changed.
class ManagerWatchdog(object):
def __init__(self):
self.manager_pid = os.getppid()
# mypy cannot detect this code is windows only
self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) # type: ignore[attr-defined]
self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
self.kernel32.OpenProcess.restype = HANDLE
self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
self.kernel32.WaitForSingleObject.restype = DWORD
# Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
SYNCHRONIZE = 0x00100000
self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid)
if not self.manager_handle:
raise ctypes.WinError(ctypes.get_last_error()) # type: ignore[attr-defined]
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
# Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
return not self.manager_dead
else:
class ManagerWatchdog(object): # type: ignore[no-redef]
def __init__(self):
self.manager_pid = os.getppid()
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
self.manager_dead = os.getppid() != self.manager_pid
return not self.manager_dead
_worker_info = None
class WorkerInfo(object):
__initialized = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.__keys = tuple(kwargs.keys())
self.__initialized = True
def __setattr__(self, key, val):
if self.__initialized:
raise RuntimeError("Cannot assign attributes to {} objects".format(self.__class__.__name__))
return super(WorkerInfo, self).__setattr__(key, val)
def __repr__(self):
items = []
for k in self.__keys:
items.append('{}={}'.format(k, getattr(self, k)))
return '{}({})'.format(self.__class__.__name__, ', '.join(items))
def get_worker_info():
r"""Returns the information about the current
:class:`~torch.utils.data.DataLoader` iterator worker process.
When called in a worker, this returns an object guaranteed to have the
following attributes:
* :attr:`id`: the current worker id.
* :attr:`num_workers`: the total number of workers.
* :attr:`seed`: the random seed set for the current worker. This value is
determined by main process RNG and the worker id. See
:class:`~torch.utils.data.DataLoader`'s documentation for more details.
* :attr:`dataset`: the copy of the dataset object in **this** process. Note
that this will be a different object in a different process than the one
in the main process.
When called in the main process, this returns ``None``.
.. note::
When used in a :attr:`worker_init_fn` passed over to
:class:`~torch.utils.data.DataLoader`, this method can be useful to
set up each worker process differently, for instance, using ``worker_id``
to configure the ``dataset`` object to only read a specific fraction of a
sharded dataset, or use ``seed`` to seed other libraries used in dataset
code.
"""
return _worker_info
r"""Dummy class used to signal the end of an IterableDataset"""
@dataclass(frozen=True)
class _IterableDatasetStopIteration(object):
worker_id: int
r"""Dummy class used to resume the fetching when worker reuse is enabled"""
@dataclass(frozen=True)
class _ResumeIteration(object):
seed: Optional[int] = None
# The function `_generate_state` is adapted from `numpy.random.SeedSequence`
# from https://github.com/numpy/numpy/blob/main/numpy/random/bit_generator.pyx
# It's MIT licensed, here is the copyright:
# Copyright (c) 2015 Melissa E. O'Neill
# Copyright (c) 2019 NumPy Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This function generates an array of int32 as the seed for
# `numpy.random`, in order to prevent state collision due to same
# seed and algorithm for `numpy.random` and `random` modules.
# TODO: Implement `SeedSequence` like object for `torch.random`
def _generate_state(base_seed, worker_id):
INIT_A = 0x43b0d7e5
MULT_A = 0x931e8875
INIT_B = 0x8b51f9dd
MULT_B = 0x58f38ded
MIX_MULT_L = 0xca01f9dd
MIX_MULT_R = 0x4973f715
XSHIFT = 4 * 8 // 2
MASK32 = 0xFFFFFFFF
entropy = [worker_id, base_seed & MASK32, base_seed >> 32, 0]
pool = [0] * 4
hash_const_A = INIT_A
def hash(value):
nonlocal hash_const_A
value = (value ^ hash_const_A) & MASK32
hash_const_A = (hash_const_A * MULT_A) & MASK32
value = (value * hash_const_A) & MASK32
value = (value ^ (value >> XSHIFT)) & MASK32
return value
def mix(x, y):
result_x = (MIX_MULT_L * x) & MASK32
result_y = (MIX_MULT_R * y) & MASK32
result = (result_x - result_y) & MASK32
result = (result ^ (result >> XSHIFT)) & MASK32
return result
# Add in the entropy to the pool.
for i in range(len(pool)):
pool[i] = hash(entropy[i])
# Mix all bits together so late bits can affect earlier bits.
for i_src in range(len(pool)):
for i_dst in range(len(pool)):
if i_src != i_dst:
pool[i_dst] = mix(pool[i_dst], hash(pool[i_src]))
hash_const_B = INIT_B
state = []
for i_dst in range(4):
data_val = pool[i_dst]
data_val = (data_val ^ hash_const_B) & MASK32
hash_const_B = (hash_const_B * MULT_B) & MASK32
data_val = (data_val * hash_const_B) & MASK32
data_val = (data_val ^ (data_val >> XSHIFT)) & MASK32
state.append(data_val)
return state
def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event,
auto_collation, collate_fn, drop_last, base_seed, init_fn, worker_id,
num_workers, persistent_workers, shared_seed):
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
try:
# Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal had already happened
# again.
# https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
seed = base_seed + worker_id
random.seed(seed)
torch.manual_seed(seed)
if HAS_NUMPY:
np_seed = _generate_state(base_seed, worker_id)
import numpy as np
np.random.seed(np_seed)
from torch.utils.data import IterDataPipe
from torch.utils.data.graph_settings import apply_shuffle_seed
shared_rng = torch.Generator()
if isinstance(dataset, IterDataPipe):
assert shared_seed is not None
shared_rng.manual_seed(shared_seed)
dataset = apply_shuffle_seed(dataset, shared_rng)
global _worker_info
_worker_info = WorkerInfo(id=worker_id, num_workers=num_workers,
seed=seed, dataset=dataset)
from torch.utils.data import _DatasetKind
init_exception = None
try:
if init_fn is not None:
init_fn(worker_id)
fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last)
except Exception:
init_exception = ExceptionWrapper(
where="in DataLoader worker process {}".format(worker_id))
# When using Iterable mode, some worker can exit earlier than others due
# to the IterableDataset behaving differently for different workers.
# When such things happen, an `_IterableDatasetStopIteration` object is
# sent over to the main process with the ID of this worker, so that the
# main process won't send more tasks to this worker, and will send
# `None` to this worker to properly exit it.
#
# Note that we cannot set `done_event` from a worker as it is shared
# among all processes. Instead, we set the `iteration_end` flag to
# signify that the iterator is exhausted. When either `done_event` or
# `iteration_end` is set, we skip all processing step and just wait for
# `None`.
iteration_end = False
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if isinstance(r, _ResumeIteration):
# Acknowledge the main process
data_queue.put((r, None))
iteration_end = False
if isinstance(dataset, IterDataPipe):
assert r.seed is not None
shared_rng.manual_seed(r.seed)
dataset = apply_shuffle_seed(dataset, shared_rng)
# Recreate the fetcher for worker-reuse policy
fetcher = _DatasetKind.create_fetcher(
dataset_kind, dataset, auto_collation, collate_fn, drop_last)
continue
elif r is None:
# Received the final signal
assert done_event.is_set() or iteration_end
break
elif done_event.is_set() or iteration_end:
# `done_event` is set. But I haven't received the final signal
# (None) yet. I will keep continuing until get it, and skip the
# processing steps.
continue
idx, index = r
data: Union[_IterableDatasetStopIteration, ExceptionWrapper]
if init_exception is not None:
data = init_exception
init_exception = None
else:
try:
data = fetcher.fetch(index)
except Exception as e:
if isinstance(e, StopIteration) and dataset_kind == _DatasetKind.Iterable:
data = _IterableDatasetStopIteration(worker_id)
# Set `iteration_end`
# (1) to save future `next(...)` calls, and
# (2) to avoid sending multiple `_IterableDatasetStopIteration`s.
iteration_end = True
else:
# It is important that we don't store exc_info in a variable.
# `ExceptionWrapper` does the correct thing.
# See NOTE [ Python Traceback Reference Cycle Problem ]
data = ExceptionWrapper(
where="in DataLoader worker process {}".format(worker_id))
data_queue.put((idx, data))
del data, idx, index, r # save memory
except KeyboardInterrupt:
# Main process will raise KeyboardInterrupt anyways.
pass
if done_event.is_set():
data_queue.cancel_join_thread()
data_queue.close()
|
pytorch-master
|
torch/utils/data/_utils/worker.py
|
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers to
collate samples fetched from dataset into Tensor(s).
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
`default_collate` and `default_convert` are exposed to users via 'dataloader.py'.
"""
import torch
import re
import collections
from torch._six import string_classes
np_str_obj_array_pattern = re.compile(r'[SaUO]')
def default_convert(data):
r"""
Function that converts each NumPy array element into a :class:`torch.Tensor`. If the input is a `Sequence`,
`Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`.
If the input is not an NumPy array, it is left unchanged.
This is used as the default function for collation when both `batch_sampler` and
`batch_size` are NOT defined in :class:`~torch.utils.data.DataLoader`.
The general input type to output type mapping is similar to that
of :func:`~torch.utils.data.default_collate`. See the description there for more details.
Args:
data: a single data point to be converted
Examples:
>>> # Example with `int`
>>> default_convert(0)
0
>>> # Example with NumPy array
>>> # xdoctest: +SKIP
>>> default_convert(np.array([0, 1]))
tensor([0, 1])
>>> # Example with NamedTuple
>>> Point = namedtuple('Point', ['x', 'y'])
>>> default_convert(Point(0, 0))
Point(x=0, y=0)
>>> default_convert(Point(np.array(0), np.array(0)))
Point(x=tensor(0), y=tensor(0))
>>> # Example with List
>>> default_convert([np.array([0, 1]), np.array([2, 3])])
[tensor([0, 1]), tensor([2, 3])]
"""
elem_type = type(data)
if isinstance(data, torch.Tensor):
return data
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
# array of string classes and object
if elem_type.__name__ == 'ndarray' \
and np_str_obj_array_pattern.search(data.dtype.str) is not None:
return data
return torch.as_tensor(data)
elif isinstance(data, collections.abc.Mapping):
try:
return elem_type({key: default_convert(data[key]) for key in data})
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return {key: default_convert(data[key]) for key in data}
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, tuple):
return [default_convert(d) for d in data] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence) and not isinstance(data, string_classes):
try:
return elem_type([default_convert(d) for d in data])
except TypeError:
# The sequence type may not support `__init__(iterable)` (e.g., `range`).
return [default_convert(d) for d in data]
else:
return data
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
def default_collate(batch):
r"""
Function that takes in a batch of data and puts the elements within the batch
into a tensor with an additional outer dimension - batch size. The exact output type can be
a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a
Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.
This is used as the default function for collation when
`batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.
Here is the general input type (based on the type of the element within the batch) to output type mapping:
* :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)
* NumPy Arrays -> :class:`torch.Tensor`
* `float` -> :class:`torch.Tensor`
* `int` -> :class:`torch.Tensor`
* `str` -> `str` (unchanged)
* `bytes` -> `bytes` (unchanged)
* `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]`
* `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]),
default_collate([V2_1, V2_2, ...]), ...]`
* `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]),
default_collate([V2_1, V2_2, ...]), ...]`
Args:
batch: a single batch to be collated
Examples:
>>> # Example with a batch of `int`s:
>>> default_collate([0, 1, 2, 3])
tensor([0, 1, 2, 3])
>>> # Example with a batch of `str`s:
>>> default_collate(['a', 'b', 'c'])
['a', 'b', 'c']
>>> # Example with `Map` inside the batch:
>>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])
{'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}
>>> # Example with `NamedTuple` inside the batch:
>>> # xdoctest: +SKIP
>>> Point = namedtuple('Point', ['x', 'y'])
>>> default_collate([Point(0, 0), Point(1, 1)])
Point(x=tensor([0, 1]), y=tensor([0, 1]))
>>> # Example with `Tuple` inside the batch:
>>> default_collate([(0, 1), (2, 3)])
[tensor([0, 2]), tensor([1, 3])]
>>> # Example with `List` inside the batch:
>>> default_collate([[0, 1], [2, 3]])
[tensor([0, 2]), tensor([1, 3])]
"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel, device=elem.device)
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
try:
return elem_type({key: default_collate([d[key] for d in batch]) for key in elem})
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.
if isinstance(elem, tuple):
return [default_collate(samples) for samples in transposed] # Backwards compatibility.
else:
try:
return elem_type([default_collate(samples) for samples in transposed])
except TypeError:
# The sequence type may not support `__init__(iterable)` (e.g., `range`).
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
|
pytorch-master
|
torch/utils/data/_utils/collate.py
|
r""""Contains definitions of the methods used by the _BaseDataLoaderIter to put
fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import queue
import torch
from torch._six import string_classes
from . import MP_STATUS_CHECK_INTERVAL
from torch._utils import ExceptionWrapper
def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.cuda.set_device(device_id)
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
while not done_event.is_set():
try:
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
idx, data = r
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
data = pin_memory(data, device)
except Exception:
data = ExceptionWrapper(
where="in pin memory thread for device {}".format(device_id))
r = (idx, data)
while not done_event.is_set():
try:
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
continue
del r # save memory
def pin_memory(data, device=None):
if isinstance(data, torch.Tensor):
return data.pin_memory(device)
elif isinstance(data, string_classes):
return data
elif isinstance(data, collections.abc.Mapping):
try:
return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return {k: pin_memory(sample, device) for k, sample in data.items()}
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
return type(data)(*(pin_memory(sample, device) for sample in data))
elif isinstance(data, tuple):
return [pin_memory(sample, device) for sample in data] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence):
try:
return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `__init__(iterable)` (e.g., `range`).
return [pin_memory(sample, device) for sample in data]
elif hasattr(data, "pin_memory"):
return data.pin_memory()
else:
return data
|
pytorch-master
|
torch/utils/data/_utils/pin_memory.py
|
r"""Utility classes & functions for data loading. Code in this folder is mostly
used by ../dataloder.py.
A lot of multiprocessing is used in data loading, which only supports running
functions defined in global environment (py2 can't serialize static methods).
Therefore, for code tidiness we put these functions into different files in this
folder.
"""
import sys
import atexit
# old private location of the ExceptionWrapper that some users rely on:
from torch._utils import ExceptionWrapper
IS_WINDOWS = sys.platform == "win32"
MP_STATUS_CHECK_INTERVAL = 5.0
r"""Interval (in seconds) to check status of processes to avoid hanging in
multiprocessing data loading. This is mainly used in getting data from
another process, in which case we need to periodically check whether the
sender is alive to prevent hanging."""
python_exit_status = False
r"""Whether Python is shutting down. This flag is guaranteed to be set before
the Python core library resources are freed, but Python may already be exiting
for some time when this is set.
Hook to set this flag is `_set_python_exit_flag`, and is inspired by a similar
hook in Python 3.7 multiprocessing library:
https://github.com/python/cpython/blob/d4d60134b29290049e28df54f23493de4f1824b6/Lib/multiprocessing/util.py#L277-L327
"""
DATAPIPE_SHARED_SEED = "_dl_shared_seed"
r"""The key to share the same seed for shuffle DataPipe across distributed processes"""
DATAPIPE_SHARED_SEED_COUNTER = "_dl_shared_seed_recv_cnt"
r"""The key to count the number of distributed processes that have received the shared seed"""
DATAPIPE_SHARED_SEED_DEFAULT_TIMEOUT = 30 * 60
r"""Timeout (in seconds) sending the shared seed from Rank 0 and sending
the signal of the shared seed received from other Ranks.
It uses the same default timeout for the distributed process group"""
DATAPIPE_SHARED_SEED_CHECK_INTERVAL = 0.01
r"""Interval to check if each rank has received the shared seed"""
try:
import numpy
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
def _set_python_exit_flag():
global python_exit_status
python_exit_status = True
atexit.register(_set_python_exit_flag)
from . import worker, signal_handling, pin_memory, collate, fetch
|
pytorch-master
|
torch/utils/data/_utils/__init__.py
|
r""""Signal handling for multiprocessing data loading.
NOTE [ Signal handling in multiprocessing data loading ]
In cases like DataLoader, if a worker process dies due to bus error/segfault
or just hang, the main process will hang waiting for data. This is difficult
to avoid on PyTorch side as it can be caused by limited shm, or other
libraries users call in the workers. In this file and `DataLoader.cpp`, we make
our best effort to provide some error message to users when such unfortunate
events happen.
When a _BaseDataLoaderIter starts worker processes, their pids are registered in a
defined in `DataLoader.cpp`: id(_BaseDataLoaderIter) => Collection[ Worker pids ]
via `_set_worker_pids`.
When an error happens in a worker process, the main process received a SIGCHLD,
and Python will eventually call the handler registered below
(in `_set_SIGCHLD_handler`). In the handler, the `_error_if_any_worker_fails`
call checks all registered worker pids and raise proper error message to
prevent main process from hanging waiting for data from worker.
Additionally, at the beginning of each worker's `_utils.worker._worker_loop`,
`_set_worker_signal_handlers` is called to register critical signal handlers
(e.g., for SIGSEGV, SIGBUS, SIGFPE, SIGTERM) in C, which just prints an error
message to stderr before triggering the default handler. So a message will also
be printed from the worker process when it is killed by such signals.
See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for the reasoning of
this signal handling design and other mechanism we implement to make our
multiprocessing data loading robust to errors.
"""
import signal
import threading
from . import IS_WINDOWS
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401
from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
def _set_SIGCHLD_handler():
# Windows doesn't support SIGCHLD handler
if IS_WINDOWS:
return
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if not callable(previous_handler):
# This doesn't catch default handler, but SIGCHLD default handler is a
# no-op.
previous_handler = None
def handler(signum, frame):
# This following call uses `waitid` with WNOHANG from C side. Therefore,
# Python can still get and update the process status successfully.
_error_if_any_worker_fails()
if previous_handler is not None:
assert callable(previous_handler)
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True
|
pytorch-master
|
torch/utils/data/_utils/signal_handling.py
|
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
DILL_AVAILABLE = True
except ImportError:
DILL_AVAILABLE = False
|
pytorch-master
|
torch/utils/data/_utils/serialization.py
|
import threading
import time
class LocalQueue():
ops = 0
stored = 0
uid = 0
empty = 0
def __init__(self, name='unnamed'):
self.items = []
self.name = name
self.uid = LocalQueue.uid
LocalQueue.uid += 1
def put(self, item, block=True):
LocalQueue.ops += 1
LocalQueue.stored += 1
self.items.append(item)
def get(self, block=True, timeout=0):
# TODO(VitalyFedyunin): Add support of block and timeout arguments
LocalQueue.ops += 1
if not len(self.items):
LocalQueue.empty += 1
raise Exception('LocalQueue is empty')
LocalQueue.stored -= 1
return self.items.pop()
class ThreadingQueue():
def __init__(self, name='unnamed'):
self.lock = threading.Lock()
self.items = []
self.name = name
def put(self, item, block=True):
with self.lock:
self.items.append(item)
def get(self, block=True, timeout=0):
# TODO(VitalyFedyunin): Add support of block and timeout arguments
while True:
with self.lock:
if len(self.items) > 0:
return self.items.pop()
if not block:
raise Exception("Not available")
# TODO(VitalyFedyunin): Figure out what to do if nothing in the queue
time.sleep(0.000001)
|
pytorch-master
|
torch/utils/data/communication/queue.py
|
import time
import types
from torch.utils.data import IterDataPipe, communication
DEFAULT_NON_BLOCKING_SLEEP = 0.001
__all__ = [
"DataPipeBehindQueues",
"EnsureNonBlockingDataPipe",
"InvalidStateResetRequired",
"NonBlocking",
"NotAvailable",
"QueueWrapper",
"default_not_available_hook",
]
def default_not_available_hook():
time.sleep(DEFAULT_NON_BLOCKING_SLEEP)
class NotAvailable(Exception):
pass
class InvalidStateResetRequired(Exception):
"""
Returned by DataPipe when it is expecting to get reset request,
for example RouterDataPipe expecting all workers to request reset'
"""
pass
class NonBlocking(IterDataPipe):
not_available_hook = default_not_available_hook
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
while True:
try:
return self.nonblocking_next()
except StopIteration:
raise StopIteration
except NotAvailable:
if NonBlocking.not_available_hook is not None:
NonBlocking.not_available_hook()
def nonblocking_next(self):
raise NotImplementedError(
"nonblocking_next is not implemented for %s" % self.__class__)
def reset_iterator(self):
raise NotImplementedError(
"reset_iterator is not implemented for %s" % self.__class__)
@staticmethod
def register_not_available_hook(hook_function):
NonBlocking.not_available_hook = hook_function
def EnsureNonBlockingDataPipe(validated_datapipe):
if not isinstance(validated_datapipe, IterDataPipe):
raise Exception('Not Iterable DataPipe ' +
str(validated_datapipe.__class__))
if isinstance(validated_datapipe, NonBlocking):
return validated_datapipe
if not hasattr(validated_datapipe, '_as_iterator'):
validated_datapipe._as_iterator = None # type: ignore[attr-defined]
if not hasattr(validated_datapipe, 'nonblocking_next'):
def nonblocking_next(self):
if self._as_iterator is None:
self._as_iterator = iter(self)
return next(self._as_iterator)
validated_datapipe.nonblocking_next = types.MethodType( # type: ignore[attr-defined]
nonblocking_next, validated_datapipe)
if not hasattr(validated_datapipe, 'reset_iterator'):
def reset_iterator(self):
self._as_iterator = None
validated_datapipe.reset_iterator = types.MethodType( # type: ignore[attr-defined]
reset_iterator, validated_datapipe)
return validated_datapipe
def DataPipeBehindQueues(source_datapipe, protocol, full_stop=False, blocking_request_get=False):
"""
Indefinitely iterates over req_queue and passing values from source_datapipe to res_queue
If raise_stop is true, raises exception when StopIteration received from the source_datapipe
"""
if not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolServer):
raise Exception('Expecting IterDataPipeQueueProtocolServer, got', protocol)
source_datapipe = EnsureNonBlockingDataPipe(source_datapipe)
forever = True
while forever:
try:
# Non-blocking call is Extremely slow here for python.mp, need to figure out a good workaround
request = protocol.get_new_request(block=blocking_request_get)
except communication.protocol.EmptyQueue:
yield True
continue
if isinstance(request, communication.messages.ResetIteratorRequest):
source_datapipe.reset_iterator()
protocol.response_reset_iterator()
elif isinstance(request, communication.messages.TerminateRequest):
forever = False
protocol.response_terminate()
elif isinstance(request, communication.messages.GetNextRequest):
while forever:
try:
value = source_datapipe.nonblocking_next()
except NotAvailable:
yield True
continue
except StopIteration:
protocol.response_stop_iteration()
if full_stop:
forever = False
else:
yield True
break
except InvalidStateResetRequired:
protocol.response_invalid_state()
if full_stop:
forever = False
else:
yield True
break
protocol.response_next(value)
yield True # Returns control
break
else:
raise Exception('Unrecognized type of request received', request)
class QueueWrapper(NonBlocking):
"""
Creates iter.DataPipe which reads data from the DataLoader.Queue
"""
def __init__(self, protocol, response_wait_time=0.00001):
if not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolClient):
raise Exception('Got', protocol)
self.protocol = protocol
self.counter = 0
self._stop_iteration = False
self._response_wait_time = response_wait_time
def reset_iterator(self):
self._stop_iteration = False
self.counter = 0
self.protocol.request_reset_iterator()
while True:
try:
self.protocol.get_response_reset_iterator()
break
except communication.protocol.EmptyQueue:
if NonBlocking.not_available_hook is not None:
NonBlocking.not_available_hook()
def nonblocking_next(self):
if self._stop_iteration:
raise Exception(
'`next` or `nonblocking_next` called after receiving StopIteration')
if self.protocol.can_take_request():
self.protocol.request_next()
try:
response = self.protocol.get_response_next(block=True, timeout=self._response_wait_time)
except communication.protocol.EmptyQueue:
raise NotAvailable
if isinstance(response, communication.messages.StopIterationResponse):
self._stop_iteration = True
raise StopIteration
if isinstance(response, communication.messages.InvalidStateResponse):
raise NotAvailable
return response.value
|
pytorch-master
|
torch/utils/data/communication/iter.py
|
from torch.utils.data import communication
class Protocol(object):
__slots__ = ('request_queue', 'response_queue')
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
class ProtocolClient(Protocol):
"""
ProtocolClient takes charge of putting requests into req_queue and returning results from res_queue.
"""
_req_sent = None
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self._req_sent = None
def can_take_request(self):
return self._req_sent is None
def waiting_for_response(self):
return self._req_sent is not None
def request_sent(self, request=True):
if not self.can_take_request():
raise Exception('Protocol only supports one request in the Queue')
self._req_sent = request
def request_served(self, result=None):
if not self.waiting_for_response():
raise Exception(
'Expected no peding requests, but something got served', result)
self._req_sent = None
class ProtocolServer(Protocol):
"""
ProtocolServer takes charge of getting requests from req_queue and fetching data from source datapipe.
"""
_req_received = None
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self._req_received = None
def have_pending_request(self):
return self._req_received is not None
def get_new_request(self, block=False):
if self.have_pending_request():
raise Exception(
'Trying to get next request, while having one unserved')
try:
response = self.request_queue.get(block=block)
except Exception as e: # TODO: Catch only timeout exceptions
raise EmptyQueue('queue is empty')
self._req_received = response
return response
# TODO: Validate supported requests
def response_terminate(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
if not isinstance(self._req_received, communication.messages.TerminateRequest):
raise Exception(
"Replaying with terminate status to other type of message")
self.response_queue.put(communication.messages.TerminateResponse())
self._req_received = None
class MapDataPipeQueueProtocolServer(ProtocolServer):
def response_item(self, key, value):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.GetItemResponse(key, value))
self._req_received = None
def response_len(self, size):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.LenResponse(size))
self._req_received = None
def response_index_out_of_bound(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.StopIterationResponse())
self._req_received = None
class MapDataPipeQueueProtocolClient(ProtocolClient):
def request_len(self):
if not self.can_take_request():
raise Exception('Can not request len while we are still waiting response for previous request')
request = communication.messages.LenRequest()
self.request_queue.put(request)
self.request_sent(request)
def request_item(self, index):
if not self.can_take_request():
raise Exception('Can not request item while we are still waiting response for previous request')
request = communication.messages.GetItemRequest(index)
self.request_queue.put(request)
self.request_sent(request)
def get_response_len(self, block=False, timeout=None):
if not self.waiting_for_response():
raise Exception('Can not expect any response without submitted request')
try:
response = self.response_queue.get(block=block, timeout=timeout)
except TimeoutError:
raise EmptyQueue('queue is empty')
self.request_served(response)
if not isinstance(response, communication.messages.LenResponse):
raise Exception('Invalid response received')
return response
def get_response_item(self, block=False, timeout=None):
if not self.waiting_for_response():
raise Exception('Can not expect any response without submitted request')
try:
response = self.response_queue.get(block=block, timeout=timeout)
except TimeoutError:
raise EmptyQueue('queue is empty')
self.request_served(response)
# if not isinstance(response, communication.messages.GetItemResponse):
# raise Exception('Invalid response received')
return response
class EmptyQueue(Exception):
pass
class IterDataPipeQueueProtocolServer(ProtocolServer):
def response_reset_iterator(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
if not isinstance(self._req_received, communication.messages.ResetIteratorRequest):
raise Exception(
"Replaying with reset status to other type of message")
self.response_queue.put(communication.messages.ResetIteratorResponse())
self._req_received = None
def response_next(self, value):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.GetNextResponse(value))
self._req_received = None
def response_stop_iteration(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.StopIterationResponse())
self._req_received = None
def response_invalid_state(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.InvalidStateResponse())
self._req_received = None
class IterDataPipeQueueProtocolClient(ProtocolClient):
def request_reset_iterator(self):
if not self.can_take_request():
raise Exception('Can not reset while we are still waiting response for previous request')
request = communication.messages.ResetIteratorRequest()
self.request_queue.put(request)
self.request_sent(request)
def request_next(self):
if not self.can_take_request():
raise Exception('Can not request next item while we are still waiting response for previous request')
request = communication.messages.GetNextRequest()
self.request_queue.put(request)
self.request_sent(request)
def get_response_reset_iterator(self, block=False):
try:
response = self.response_queue.get(block=block)
except Exception as e: # TODO: Catch only timeout exceptions
raise EmptyQueue('queue is empty')
self.request_served(response)
if not isinstance(response, communication.messages.ResetIteratorResponse):
raise Exception('Invalid response received')
def get_response_next(self, block=False, timeout=None):
if not self.waiting_for_response():
raise Exception(
'Can not expect any response without submitted request')
try:
response = self.response_queue.get(block=block, timeout=timeout)
except Exception as e: # TODO: Catch only timeout exceptions
raise EmptyQueue('queue is empty')
self.request_served(response)
# TODO(VitalyFedyunin): Add possible response types validation here
return response
|
pytorch-master
|
torch/utils/data/communication/protocol.py
|
from . import eventloop
from . import iter
from . import map
from . import messages
from . import protocol
from . import queue
|
pytorch-master
|
torch/utils/data/communication/__init__.py
|
import torch
import threading
import pickle
from torch.utils.data import IterDataPipe, communication, MapDataPipe
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
__all__ = [
"DataPipeToQueuesLoop",
"SpawnProcessForDataPipeline",
"SpawnThreadForDataPipeline",
]
def DataPipeToQueuesLoop(source_datapipe, req_queue, res_queue):
if isinstance(source_datapipe, IterDataPipe):
pipe_type = communication.iter
protocol_type = communication.protocol.IterDataPipeQueueProtocolServer
elif isinstance(source_datapipe, MapDataPipe):
pipe_type = communication.map # type: ignore[misc]
protocol_type = communication.protocol.MapDataPipeQueueProtocolServer # type: ignore[assignment]
else:
raise Exception('Only supports IterDataPipe or MapDataPipe, got', source_datapipe)
torch.set_num_threads(1)
for _ in pipe_type.DataPipeBehindQueues(source_datapipe, protocol_type(req_queue, res_queue),
blocking_request_get=True):
pass
def SpawnProcessForDataPipeline(multiprocessing_ctx, datapipe):
req_queue = multiprocessing_ctx.Queue()
res_queue = multiprocessing_ctx.Queue()
process = multiprocessing_ctx.Process(
target=DataPipeToQueuesLoop, args=(datapipe, req_queue, res_queue))
return process, req_queue, res_queue
def SpawnThreadForDataPipeline(datapipe):
r"""
Given a DataPipe, creates a copy of the DataPipe, starts a new Thread with DataPipeToQueuesLoop as target,
and return the process, req_queue, res_queue, thread_local_datapipe.
"""
req_queue = communication.queue.ThreadingQueue()
res_queue = communication.queue.ThreadingQueue()
try:
new_datapipe = pickle.loads(pickle.dumps(datapipe))
except Exception as pe:
if HAS_DILL:
try:
new_datapipe = dill.loads(dill.dumps(datapipe))
except Exception as de:
raise Exception('Unable to dill DataPipe to make thread local copy', de)
else:
raise Exception('Unable to pickle DataPipe to make thread local copy (consider installing `dill`)', pe)
process = threading.Thread(target=DataPipeToQueuesLoop, args=(
new_datapipe, req_queue, res_queue), daemon=True)
return process, req_queue, res_queue, new_datapipe
|
pytorch-master
|
torch/utils/data/communication/eventloop.py
|
import time
import types
from torch.utils.data import communication, MapDataPipe
DEFAULT_NON_BLOCKING_SLEEP = 0.001
__all__ = [
"DataPipeBehindQueues",
"EnsureNonBlockingMapDataPipe",
"NonBlockingMap",
"NotAvailable",
"QueueWrapperForMap",
"default_not_available_hook",
]
def default_not_available_hook():
time.sleep(DEFAULT_NON_BLOCKING_SLEEP)
class NotAvailable(Exception):
pass
class NonBlockingMap(MapDataPipe):
not_available_hook = default_not_available_hook
def __getitem__(self, index):
while True:
try:
return self.nonblocking_getitem(index)
except NotAvailable:
if NonBlockingMap.not_available_hook is not None:
NonBlockingMap.not_available_hook()
def __len__(self):
try:
return self.nonblocking_len()
except NotAvailable:
if NonBlockingMap.not_available_hook is not None:
NonBlockingMap.not_available_hook()
def nonblocking_len(self):
raise NotImplementedError(
"nonblocking_len is not implemented for %s" % self.__class__)
def nonblocking_getitem(self, index):
raise NotImplementedError(
"nonblocking_getitem is not implemented for %s" % self.__class__)
@staticmethod
def register_not_available_hook(hook_function):
NonBlockingMap.not_available_hook = hook_function
def EnsureNonBlockingMapDataPipe(validated_datapipe):
if not isinstance(validated_datapipe, MapDataPipe):
raise Exception(f'Not Map DataPipe - got {validated_datapipe.__class__}')
if isinstance(validated_datapipe, NonBlockingMap):
return validated_datapipe
if not hasattr(validated_datapipe, 'nonblocking_len'):
def nonblocking_len(self):
return self.__len__()
validated_datapipe.nonblocking_len = types.MethodType( # type: ignore[attr-defined]
nonblocking_len, validated_datapipe)
if not hasattr(validated_datapipe, 'nonblocking_getitem'):
def nonblocking_getitem(self, index):
return self.__getitem__(index)
validated_datapipe.nonblocking_getitem = types.MethodType( # type: ignore[attr-defined]
nonblocking_getitem, validated_datapipe)
return validated_datapipe
def DataPipeBehindQueues(source_datapipe, protocol, full_stop=False, blocking_request_get=False):
"""
Indefinitely iterates over req_queue and passing values from source_datapipe to res_queue
If raise_stop is true, raises exception when StopIteration received from the source_datapipe
"""
if not isinstance(protocol, communication.protocol.MapDataPipeQueueProtocolServer):
raise Exception('Expecting MapDataPipeQueueProtocolServer, got', protocol)
source_datapipe = EnsureNonBlockingMapDataPipe(source_datapipe)
forever = True
while forever:
try:
# Non-blocking call is Extremely slow here for python.mp, need to figure out a good workaround
request = protocol.get_new_request(block=blocking_request_get)
except communication.protocol.EmptyQueue:
yield True
continue
if isinstance(request, communication.messages.TerminateRequest):
forever = False
protocol.response_terminate()
elif isinstance(request, communication.messages.LenRequest):
size = source_datapipe.nonblocking_len()
protocol.response_len(size)
elif isinstance(request, communication.messages.GetItemRequest):
while forever:
try:
value = source_datapipe.nonblocking_getitem(request.key)
except NotAvailable:
yield True
continue
except IndexError as e:
# Alternatively, we can just allow the underlying DataPipe to throw an exception?
protocol.response_index_out_of_bound()
if full_stop:
forever = False
else:
yield True
break
protocol.response_item(request.key, value)
yield True # Returns control
break
else:
raise Exception('Unrecognized type of request received', request)
class QueueWrapperForMap(NonBlockingMap):
"""
Creates map.DataPipe which reads data from the DataLoader.Queue
"""
def __init__(self, protocol, response_wait_time=0.00001):
if not isinstance(protocol, communication.protocol.MapDataPipeQueueProtocolClient):
raise Exception('Got', protocol)
self.protocol = protocol
self.counter = 0
self._stop_iteration = False
self._response_wait_time = response_wait_time
def nonblocking_getitem(self, index):
if self._stop_iteration:
raise Exception(
'`getitem` or `nonblocking_getitem` called after receiving StopIteration')
if self.protocol.can_take_request():
self.protocol.request_item(index)
try:
response = self.protocol.get_response_item(block=True, timeout=self._response_wait_time)
except communication.protocol.EmptyQueue:
raise NotAvailable
if isinstance(response, communication.messages.StopIterationResponse):
self._stop_iteration = True
raise IndexError(f"Index {index} is out of bound.")
return response.key, response.value
def nonblocking_len(self):
if self._stop_iteration:
raise Exception(
'`len` or `nonblocking_len` called after receiving StopIteration')
if self.protocol.can_take_request():
self.protocol.request_len()
try:
response = self.protocol.get_response_len(block=True, timeout=self._response_wait_time)
except communication.protocol.EmptyQueue:
raise NotAvailable
return response.len
|
pytorch-master
|
torch/utils/data/communication/map.py
|
class DataLoaderQueueMessage(object):
pass
class Request(DataLoaderQueueMessage):
pass
class Response(DataLoaderQueueMessage):
pass
class ResetIteratorRequest(Request):
pass
class ResetIteratorResponse(Response):
pass
class TerminateRequest(Request):
pass
class TerminateResponse(Response):
pass
class LenRequest(Request):
pass
class LenResponse(Response):
__slots__ = ('len')
def __init__(self, len):
self.len = len
class GetItemRequest(Request):
__slots__ = ('key')
def __init__(self, key):
self.key = key
class GetItemResponse(Response):
__slots__ = ('key', 'value')
def __init__(self, key, value):
self.key = key
self.value = value
class GetNextRequest(Request):
pass
class GetNextResponse(Response):
__slots__ = ('value')
def __init__(self, value):
self.value = value
class StopIterationResponse(Response):
pass
class InvalidStateResponse(Response):
"""
Returned by DataPipe when it is expecting to get reset request,
for example RouterDataPipe expecting all workers to request reset'
"""
pass
|
pytorch-master
|
torch/utils/data/communication/messages.py
|
import inspect
from functools import wraps
from typing import Any, Callable, Optional, Type, Union, get_type_hints
from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe
from torch.utils.data.datapipes._typing import _DataPipeMeta
######################################################
# Functional API
######################################################
class functional_datapipe(object):
name: str
def __init__(self, name: str, enable_df_api_tracing=False) -> None:
"""
Args:
enable_df_api_tracing - if set, any returned DataPipe would accept
DataFrames API in tracing mode.
"""
self.name = name
self.enable_df_api_tracing = enable_df_api_tracing
def __call__(self, cls):
if issubclass(cls, IterDataPipe):
if isinstance(cls, Type): # type: ignore[arg-type]
if not isinstance(cls, _DataPipeMeta):
raise TypeError('`functional_datapipe` can only decorate IterDataPipe')
# with non_deterministic decorator
else:
if not isinstance(cls, non_deterministic) and \
not (hasattr(cls, '__self__') and
isinstance(cls.__self__, non_deterministic)):
raise TypeError('`functional_datapipe` can only decorate IterDataPipe')
IterDataPipe.register_datapipe_as_function(self.name, cls, enable_df_api_tracing=self.enable_df_api_tracing)
elif issubclass(cls, MapDataPipe):
MapDataPipe.register_datapipe_as_function(self.name, cls)
return cls
######################################################
# Determinism
######################################################
_determinism: bool = False
class guaranteed_datapipes_determinism(object):
prev: bool
def __init__(self) -> None:
global _determinism
self.prev = _determinism
_determinism = True
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
global _determinism
_determinism = self.prev
class non_deterministic(object):
cls: Optional[Type[IterDataPipe]] = None
# TODO: Lambda for picking
deterministic_fn: Callable[[], bool]
def __init__(self, arg: Union[Type[IterDataPipe], Callable[[], bool]]) -> None:
# 1. Decorator doesn't have any argument
if isinstance(arg, Type): # type: ignore[arg-type]
if not issubclass(arg, IterDataPipe): # type: ignore[arg-type]
raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`"
", but {} is found".format(arg.__name__))
self.cls = arg # type: ignore[assignment]
# 2. Decorator has an argument of a function
# This class should behave differently given different inputs. Use this
# function to verify the determinism for each instance.
# When the function returns True, the instance is non-deterministic. Otherwise,
# the instance is a deterministic DataPipe.
elif isinstance(arg, Callable): # type:ignore[arg-type]
self.deterministic_fn = arg # type: ignore[assignment, misc]
else:
raise TypeError("{} can not be decorated by non_deterministic".format(arg))
def __call__(self, *args, **kwargs):
global _determinism
# Decorate IterDataPipe
if self.cls is not None:
if _determinism:
raise TypeError("{} is non-deterministic, but you set 'guaranteed_datapipes_determinism'. "
"You can turn off determinism for this DataPipe if that is acceptable "
"for your application".format(self.cls.__name__))
return self.cls(*args, **kwargs) # type: ignore[call-arg]
# Decorate with a functional argument
if not (isinstance(args[0], Type) and # type: ignore[arg-type]
issubclass(args[0], IterDataPipe)):
raise TypeError("Only `IterDataPipe` can be decorated, but {} is found"
.format(args[0].__name__))
self.cls = args[0]
return self.deterministic_wrapper_fn
def deterministic_wrapper_fn(self, *args, **kwargs) -> IterDataPipe:
res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc]
if not isinstance(res, bool):
raise TypeError("deterministic_fn of `non_deterministic` decorator is required "
"to return a boolean value, but {} is found".format(type(res)))
global _determinism
if _determinism and res:
raise TypeError("{} is non-deterministic with the inputs, but you set "
"'guaranteed_datapipes_determinism'. You can turn off determinism "
"for this DataPipe if that is acceptable for your application"
.format(self.cls.__name__)) # type: ignore[union-attr]
return self.cls(*args, **kwargs) # type: ignore[call-arg, misc]
######################################################
# Type validation
######################################################
# Validate each argument of DataPipe with hint as a subtype of the hint.
def argument_validation(f):
signature = inspect.signature(f)
hints = get_type_hints(f)
@wraps(f)
def wrapper(*args, **kwargs):
bound = signature.bind(*args, **kwargs)
for argument_name, value in bound.arguments.items():
if argument_name in hints and isinstance(hints[argument_name], _DataPipeMeta):
hint = hints[argument_name]
if not isinstance(value, IterDataPipe):
raise TypeError("Expected argument '{}' as a IterDataPipe, but found {}"
.format(argument_name, type(value)))
if not value.type.issubtype(hint.type):
raise TypeError("Expected type of argument '{}' as a subtype of "
"hint {}, but found {}"
.format(argument_name, hint.type, value.type))
return f(*args, **kwargs)
return wrapper
# Default value is True
_runtime_validation_enabled: bool = True
class runtime_validation_disabled(object):
prev: bool
def __init__(self) -> None:
global _runtime_validation_enabled
self.prev = _runtime_validation_enabled
_runtime_validation_enabled = False
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
global _runtime_validation_enabled
_runtime_validation_enabled = self.prev
# Runtime checking
# Validate output data is subtype of return hint
def runtime_validation(f):
# TODO:
# Can be extended to validate '__getitem__' and nonblocking
if f.__name__ != '__iter__':
raise TypeError("Can not decorate function {} with 'runtime_validation'"
.format(f.__name__))
@wraps(f)
def wrapper(self):
global _runtime_validation_enabled
if not _runtime_validation_enabled:
yield from f(self)
else:
it = f(self)
for d in it:
if not self.type.issubtype_of_instance(d):
raise RuntimeError("Expected an instance as subtype of {}, but found {}({})"
.format(self.type, d, type(d)))
yield d
return wrapper
|
pytorch-master
|
torch/utils/data/datapipes/_decorator.py
|
# Taking reference from official Python typing
# https://github.com/python/cpython/blob/master/Lib/typing.py
import collections
import functools
import numbers
import sys
from torch.utils.data.datapipes._hook_iterator import hook_iterator, _SnapshotState
from typing import (Any, Dict, Iterator, Generic, List, Set, Tuple, TypeVar, Union,
get_type_hints)
from typing import _eval_type, _tp_cache, _type_check, _type_repr # type: ignore[attr-defined]
from typing import ForwardRef
# TODO: Use TypeAlias when Python 3.6 is deprecated
# Please check [Note: TypeMeta and TypeAlias]
# In case of metaclass conflict due to ABCMeta or _ProtocolMeta
# For Python 3.9, only Protocol in typing uses metaclass
from abc import ABCMeta
from typing import _GenericAlias # type: ignore[attr-defined, no-redef]
class GenericMeta(ABCMeta): # type: ignore[no-redef]
pass
class Integer(numbers.Integral):
pass
class Boolean(numbers.Integral):
pass
# Python 'type' object is not subscriptable
# Tuple[int, List, dict] -> valid
# tuple[int, list, dict] -> invalid
# Map Python 'type' to abstract base class
TYPE2ABC = {
bool: Boolean,
int: Integer,
float: numbers.Real,
complex: numbers.Complex,
dict: Dict,
list: List,
set: Set,
tuple: Tuple,
None: type(None),
}
def issubtype(left, right, recursive=True):
r"""
Check if the left-side type is a subtype of the right-side type.
If any of type is a composite type like `Union` and `TypeVar` with
bounds, it would be expanded into a list of types and check all
of left-side types are subtypes of either one from right-side types.
"""
left = TYPE2ABC.get(left, left)
right = TYPE2ABC.get(right, right)
if right is Any or left == right:
return True
if isinstance(right, _GenericAlias):
if getattr(right, '__origin__', None) is Generic:
return True
if right == type(None):
return False
# Right-side type
constraints = _decompose_type(right)
if len(constraints) == 0 or Any in constraints:
return True
if left is Any:
return False
# Left-side type
variants = _decompose_type(left)
# all() will return True for empty variants
if len(variants) == 0:
return False
return all(_issubtype_with_constraints(variant, constraints, recursive) for variant in variants)
def _decompose_type(t, to_list=True):
if isinstance(t, TypeVar):
if t.__bound__ is not None:
ts = [t.__bound__]
else:
# For T_co, __constraints__ is ()
ts = list(t.__constraints__)
elif hasattr(t, '__origin__') and t.__origin__ == Union:
ts = t.__args__
else:
if not to_list:
return None
ts = [t]
# Ignored: Generator has incompatible item type "object"; expected "Type[Any]"
ts = list(TYPE2ABC.get(_t, _t) for _t in ts) # type: ignore[misc]
return ts
def _issubtype_with_constraints(variant, constraints, recursive=True):
r"""
Check if the variant is a subtype of either one from constraints.
For composite types like `Union` and `TypeVar` with bounds, they
would be expanded for testing.
"""
if variant in constraints:
return True
# [Note: Subtype for Union and TypeVar]
# Python typing is able to flatten Union[Union[...]] or Union[TypeVar].
# But it couldn't flatten the following scenarios:
# - Union[int, TypeVar[Union[...]]]
# - TypeVar[TypeVar[...]]
# So, variant and each constraint may be a TypeVar or a Union.
# In these cases, all of inner types from the variant are required to be
# extraced and verified as a subtype of any constraint. And, all of
# inner types from any constraint being a TypeVar or a Union are
# also required to be extracted and verified if the variant belongs to
# any of them.
# Variant
vs = _decompose_type(variant, to_list=False)
# Variant is TypeVar or Union
if vs is not None:
return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs)
# Variant is not TypeVar or Union
if hasattr(variant, '__origin__') and variant.__origin__ is not None:
v_origin = variant.__origin__
# In Python-3.9 typing library untyped generics do not have args
v_args = getattr(variant, "__args__", None)
else:
v_origin = variant
v_args = None
# Constraints
for constraint in constraints:
cs = _decompose_type(constraint, to_list=False)
# Constraint is TypeVar or Union
if cs is not None:
if _issubtype_with_constraints(variant, cs, recursive):
return True
# Constraint is not TypeVar or Union
else:
# __origin__ can be None for plain list, tuple, ... in Python 3.6
if hasattr(constraint, '__origin__') and constraint.__origin__ is not None:
c_origin = constraint.__origin__
if v_origin == c_origin:
if not recursive:
return True
# In Python-3.9 typing library untyped generics do not have args
c_args = getattr(constraint, "__args__", None)
if c_args is None or len(c_args) == 0:
return True
if v_args is not None and len(v_args) == len(c_args) and \
all(issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args)):
return True
# Tuple[int] -> Tuple
else:
if v_origin == constraint:
return True
return False
def issubinstance(data, data_type):
if not issubtype(type(data), data_type, recursive=False):
return False
# In Python-3.9 typing library __args__ attribute is not defined for untyped generics
dt_args = getattr(data_type, "__args__", None)
if isinstance(data, tuple):
if dt_args is None or len(dt_args) == 0:
return True
if len(dt_args) != len(data):
return False
return all(issubinstance(d, t) for d, t in zip(data, dt_args))
elif isinstance(data, (list, set)):
if dt_args is None or len(dt_args) == 0:
return True
t = dt_args[0]
return all(issubinstance(d, t) for d in data)
elif isinstance(data, dict):
if dt_args is None or len(dt_args) == 0:
return True
kt, vt = dt_args
return all(issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items())
return True
# [Note: TypeMeta and TypeAlias]
# In order to keep compatibility for Python 3.6, use Meta for the typing.
# TODO: When PyTorch drops the support for Python 3.6, it can be converted
# into the Alias system and using `__class_getitem__` for DataPipe. The
# typing system will gain benefit of performance and resolving metaclass
# conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/
class _DataPipeType:
r"""
Save type annotation in `param`
"""
def __init__(self, param):
self.param = param
def __repr__(self):
return _type_repr(self.param)
def __eq__(self, other):
if isinstance(other, _DataPipeType):
return self.param == other.param
return NotImplemented
def __hash__(self):
return hash(self.param)
def issubtype(self, other):
if isinstance(other.param, _GenericAlias):
if getattr(other.param, '__origin__', None) is Generic:
return True
if isinstance(other, _DataPipeType):
return issubtype(self.param, other.param)
if isinstance(other, type):
return issubtype(self.param, other)
raise TypeError("Expected '_DataPipeType' or 'type', but found {}".format(type(other)))
def issubtype_of_instance(self, other):
return issubinstance(other, self.param)
# Default type for DataPipe without annotation
T_co = TypeVar('T_co', covariant=True)
_DEFAULT_TYPE = _DataPipeType(Generic[T_co])
class _DataPipeMeta(GenericMeta):
r"""
Metaclass for `DataPipe`. Add `type` attribute and `__init_subclass__` based
on the type, and validate the return hint of `__iter__`.
Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`.
"""
type: _DataPipeType
def __new__(cls, name, bases, namespace, **kwargs):
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
# TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now.
cls.__origin__ = None
if 'type' in namespace:
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
namespace['__type_class__'] = False
# For plain derived class without annotation
for base in bases:
if isinstance(base, _DataPipeMeta):
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
namespace.update({'type': _DEFAULT_TYPE,
'__init_subclass__': _dp_init_subclass})
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
def __init__(self, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload]
# TODO: Fix isinstance bug
@_tp_cache
def _getitem_(self, params):
if params is None:
raise TypeError('{}[t]: t can not be None'.format(self.__name__))
if isinstance(params, str):
params = ForwardRef(params)
if not isinstance(params, tuple):
params = (params, )
msg = "{}[t]: t must be a type".format(self.__name__)
params = tuple(_type_check(p, msg) for p in params)
if isinstance(self.type.param, _GenericAlias):
orig = getattr(self.type.param, '__origin__', None)
if isinstance(orig, type) and orig is not Generic:
p = self.type.param[params] # type: ignore[index]
t = _DataPipeType(p)
l = len(str(self.type)) + 2
name = self.__name__[:-l]
name = name + '[' + str(t) + ']'
bases = (self,) + self.__bases__
return self.__class__(name, bases,
{'__init_subclass__': _dp_init_subclass,
'type': t,
'__type_class__': True})
if len(params) > 1:
raise TypeError('Too many parameters for {} actual {}, expected 1'.format(self, len(params)))
t = _DataPipeType(params[0])
if not t.issubtype(self.type):
raise TypeError('Can not subclass a DataPipe[{}] from DataPipe[{}]'
.format(t, self.type))
# Types are equal, fast path for inheritance
if self.type == t:
return self
name = self.__name__ + '[' + str(t) + ']'
bases = (self,) + self.__bases__
return self.__class__(name, bases,
{'__init_subclass__': _dp_init_subclass,
'__type_class__': True,
'type': t})
# TODO: Fix isinstance bug
def _eq_(self, other):
if not isinstance(other, _DataPipeMeta):
return NotImplemented
if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type]
return self is other
return (self.__origin__ == other.__origin__ # type: ignore[has-type]
and self.type == other.type)
# TODO: Fix isinstance bug
def _hash_(self):
return hash((self.__name__, self.type))
class _IterDataPipeMeta(_DataPipeMeta):
r"""
Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`. Aad various functions for behaviors
specific to `IterDataPipe`.
"""
def __new__(cls, name, bases, namespace, **kwargs):
if 'reset' in namespace:
reset_func = namespace['reset']
@functools.wraps(reset_func)
def conditional_reset(*args, **kwargs):
r"""
Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating`. This allows recently
restored DataPipe to preserve its restored state during the initial `__iter__` call.
"""
datapipe = args[0]
if datapipe._snapshot_state == _SnapshotState.Iterating:
# Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have
# already begun iterating.
datapipe._number_of_samples_yielded = 0
datapipe._fast_forward_iterator = None
reset_func(*args, **kwargs)
datapipe._snapshot_state = _SnapshotState.Iterating
namespace['reset'] = conditional_reset
if '__iter__' in namespace:
hook_iterator(namespace, 'enumerate(DataPipe)#{}'.format(name))
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
def _dp_init_subclass(sub_cls, *args, **kwargs):
# Add function for datapipe instance to reinforce the type
sub_cls.reinforce_type = reinforce_type
# TODO:
# - add global switch for type checking at compile-time
# Ignore internal type class
if getattr(sub_cls, '__type_class__', False):
return
# Check if the string type is valid
if isinstance(sub_cls.type.param, ForwardRef):
base_globals = sys.modules[sub_cls.__module__].__dict__
try:
param = _eval_type(sub_cls.type.param, base_globals, locals())
sub_cls.type.param = param
except TypeError as e:
raise TypeError("{} is not supported by Python typing"
.format(sub_cls.type.param.__forward_arg__)) from e
if '__iter__' in sub_cls.__dict__:
iter_fn = sub_cls.__dict__['__iter__']
hints = get_type_hints(iter_fn)
if 'return' in hints:
return_hint = hints['return']
# Plain Return Hint for Python 3.6
if return_hint == Iterator:
return
if not (hasattr(return_hint, '__origin__') and
(return_hint.__origin__ == Iterator or
return_hint.__origin__ == collections.abc.Iterator)):
raise TypeError("Expected 'Iterator' as the return annotation for `__iter__` of {}"
", but found {}".format(sub_cls.__name__, _type_repr(hints['return'])))
data_type = return_hint.__args__[0]
if not issubtype(data_type, sub_cls.type.param):
raise TypeError("Expected return type of '__iter__' as a subtype of {}, but found {}"
" for {}".format(sub_cls.type, _type_repr(data_type), sub_cls.__name__))
def reinforce_type(self, expected_type):
r"""
Reinforce the type for DataPipe instance. And the 'expected_type' is required
to be a subtype of the original type hint to restrict the type requirement
of DataPipe instance.
"""
if isinstance(expected_type, tuple):
expected_type = Tuple[expected_type]
_type_check(expected_type, msg="'expected_type' must be a type")
if not issubtype(expected_type, self.type.param):
raise TypeError("Expected 'expected_type' as subtype of {}, but found {}"
.format(self.type, _type_repr(expected_type)))
self.type = _DataPipeType(expected_type)
return self
|
pytorch-master
|
torch/utils/data/datapipes/_typing.py
|
import inspect
import functools
from enum import Enum
import torch.autograd
class _SnapshotState(Enum):
r"""
These are the snapshotting-related states that IterDataPipes can be in.
`NotStarted` - allows you to restore a snapshot and create an iterator without reset
`Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe
`Iterating` - can restore, will reset if you create a new iterator
"""
NotStarted = 0
Restored = 1
Iterating = 2
def _simplify_obj_name(obj) -> str:
"""
Simplify the display strings of objects for the purpose of rendering within DataPipe error messages.
"""
if inspect.isfunction(obj):
return obj.__name__
else:
return repr(obj)
def _generate_input_args_string(obj):
"""
Generate a string for the input arguments of an object.
"""
signature = inspect.signature(obj.__class__)
input_param_names = set()
for param_name, _ in signature.parameters.items():
input_param_names.add(param_name)
result = []
for name, obj in inspect.getmembers(obj):
if name in input_param_names:
result.append((name, _simplify_obj_name(obj)))
return ', '.join([f'{name}={value}' for name, value in result])
def _generate_iterdatapipe_msg(datapipe):
return f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
def _gen_invalid_iterdatapipe_msg(datapipe):
return ("This iterator has been invalidated because another iterator has been created "
f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n"
"This may be caused multiple references to the same IterDataPipe. We recommend "
"using `.fork()` if that is necessary.")
_feedback_msg = ("\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free "
"to comment on this issue: https://github.com/pytorch/data/issues/45.")
def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None:
r"""
Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception.
In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well.
"""
if next_method_exists:
# This is the case where `IterDataPipe` has both `__iter__` and `__next__`.
# The `_valid_iterator_id` should either be never set (`None`), or set by at most one
# iterator (`0`). Otherwise, it means there are multiple iterators.
if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0:
extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method"
raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg)
elif hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
if hasattr(datapipe, "_check_valid_iterator_id"):
if not datapipe._check_valid_iterator_id(iterator_id):
raise RuntimeError("This iterator has been invalidated, because a new iterator has been created "
f"from one of the ChildDataPipes of "
f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}." + _feedback_msg)
else:
raise RuntimeError("ChildDataPipe must have method `_check_valid_iterator_id`.")
elif datapipe._valid_iterator_id != iterator_id:
raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg)
def _set_datapipe_valid_iterator_id(datapipe):
r"""
Given a DataPipe, updates its valid iterator ID and reset the DataPipe.
"""
if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"):
datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate
else:
raise RuntimeError("ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.")
else:
if datapipe._valid_iterator_id is None:
datapipe._valid_iterator_id = 0
else:
datapipe._valid_iterator_id += 1
datapipe.reset()
return datapipe._valid_iterator_id
def hook_iterator(namespace, profile_name):
r"""
Hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`. This is done for the purpose of
profiling and checking if an iterator is still valid.
"""
def profiler_record_fn_context():
return torch.autograd.profiler.record_function(profile_name)
class IteratorDecorator:
r"""
Wrap the iterator and modifying its `__next__` method. This decorator is applied to
DataPipes of which `__iter__` method is NOT a generator function. Those `__iter__`
method commonly returns `self` but not necessarily.
"""
def __init__(self, iterator, source_dp, iterator_id, has_next_method):
self.iterator = iterator
self.source_dp = source_dp
self.iterator_id = iterator_id
self._profiler_enabled = torch.autograd._profiler_enabled()
# Check if `__iter__` returns `self` and `DataPipe` has `__next__`
self.self_and_has_next_method = self.iterator is self.source_dp and has_next_method
def __iter__(self):
return self
def _get_next(self):
r"""
Return next with logic related to iterator validity, profiler, and incrementation of samples yielded.
"""
_check_iterator_valid(self.source_dp, self.iterator_id)
result = next(self.iterator)
if not self.self_and_has_next_method:
self.source_dp._number_of_samples_yielded += 1
return result
def __next__(self):
# TODO: Add try-except to in-place reduce traceback from the Exception
# See: https://github.com/pytorch/data/issues/284
if self._profiler_enabled:
with profiler_record_fn_context():
return self._get_next()
else: # Decided against using `contextlib.nullcontext` for performance reasons
return self._get_next()
def __getattr__(self, name):
return getattr(self.iterator, name)
func = namespace['__iter__']
# ``__iter__`` of IterDataPipe is a generator function
if inspect.isgeneratorfunction(func):
@functools.wraps(func)
def wrap_generator(*args, **kwargs):
gen = func(*args, **kwargs)
datapipe = args[0]
if datapipe._fast_forward_iterator:
it = datapipe._fast_forward_iterator
datapipe._fast_forward_iterator = None
datapipe._snapshot_state = _SnapshotState.Iterating
while True:
try:
yield next(it)
except StopIteration:
return
iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
_profiler_enabled = torch.autograd._profiler_enabled()
try:
if _profiler_enabled:
with profiler_record_fn_context():
response = gen.send(None)
else:
response = gen.send(None)
while True:
datapipe._number_of_samples_yielded += 1
request = yield response
# Pass through here every time `__next__` is called
if _profiler_enabled:
with profiler_record_fn_context():
_check_iterator_valid(datapipe, iterator_id)
response = gen.send(request)
else: # Decided against using `contextlib.nullcontext` for performance reasons
_check_iterator_valid(datapipe, iterator_id)
response = gen.send(request)
except StopIteration as e:
return
except Exception as e:
# TODO: Simplify the traceback message to skip over `response = gen.send(None)`
# Part of https://github.com/pytorch/data/issues/284
datapipe = args[0]
msg = "thrown by __iter__ of"
single_iterator_msg = "single iterator per IterDataPipe constraint"
if hasattr(e.args, '__len__'):
full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
if len(e.args) == 0: # If an exception message doesn't exist
e.args = (f'\nThis exception is {full_msg}',)
elif msg not in e.args[0] and single_iterator_msg not in e.args[0]:
e.args = (e.args[0] + f'\nThis exception is {full_msg}',) + e.args[1:]
raise
namespace['__iter__'] = wrap_generator
else: # ``__iter__`` of IterDataPipe is NOT a generator function
# IterDataPipe is an iterator with both ``__iter__`` and ``__next__``
# And ``__iter__`` may or may not return `self`
if '__next__' in namespace: # If `__next__` exists, put a wrapper around it
next_func = namespace['__next__']
@functools.wraps(next_func)
def wrap_next(*args, **kwargs):
if torch.autograd._profiler_enabled():
with profiler_record_fn_context():
result = next_func(*args, **kwargs)
else:
result = next_func(*args, **kwargs)
datapipe = args[0]
datapipe._number_of_samples_yielded += 1
return result
namespace['__next__'] = wrap_next
# Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but
# the user will be violating the iterator protocol. Potential issue:
# 1. Valid iterator ID may not update or checked properly
# 2. The number of samples yielded will be miscounted
# Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators
@functools.wraps(func)
def wrap_iter(*args, **kwargs):
iter_ret = func(*args, **kwargs)
datapipe = args[0]
datapipe._snapshot_state = _SnapshotState.Iterating
if datapipe._fast_forward_iterator:
iter_ret = datapipe._fast_forward_iterator
datapipe._fast_forward_iterator = None
return iter_ret
iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator
return IteratorDecorator(iter_ret, datapipe, iterator_id, '__next__' in namespace)
namespace['__iter__'] = wrap_iter
|
pytorch-master
|
torch/utils/data/datapipes/_hook_iterator.py
|
import functools
import pickle
from typing import Dict, Callable, Optional, TypeVar, Generic, Iterator
from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
from torch.utils.data.datapipes.utils.common import (
_deprecation_warning,
_iter_deprecated_functional_names,
_map_deprecated_functional_names,
)
from torch.utils.data.dataset import Dataset, IterableDataset
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
__all__ = [
"DataChunk",
"DFIterDataPipe",
"IterDataPipe",
"MapDataPipe",
]
T = TypeVar('T')
T_co = TypeVar('T_co', covariant=True)
UNTRACABLE_DATAFRAME_PIPES = ['batch', # As it returns DataChunks
'groupby', # As it returns DataChunks
'_dataframes_as_tuples', # As it unpacks DF
'trace_as_dataframe', # As it used to mark DF for tracing
]
class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta):
r"""
Iterable-style DataPipe.
All DataPipes that represent an iterable of data samples should subclass this.
This style of DataPipes is particularly useful when data come from a stream, or
when the number of samples is too large to fit them all in memory. ``IterDataPipe`` is lazily initialized and its
elements are computed only when ``next()`` is called on the iterator of an ``IterDataPipe``.
All subclasses should overwrite :meth:`__iter__`, which would return an
iterator of samples in this DataPipe. Calling ``__iter__`` of an ``IterDataPipe`` automatically invokes its
method ``reset()``, which by default performs no operation. When writing a custom ``IterDataPipe``, users should
override ``reset()`` if necessary. The common usages include resetting buffers, pointers,
and various state variables within the custom ``IterDataPipe``.
Note:
Only `one` iterator can be valid for each ``IterDataPipe`` at a time,
and the creation a second iterator will invalidate the first one. This constraint is necessary because
some ``IterDataPipe`` have internal buffers, whose states can become invalid if there are multiple iterators.
The code example below presents details on how this constraint looks in practice.
If you have any feedback related to this constraint, please see `GitHub IterDataPipe Single Iterator Issue`_.
These DataPipes can be invoked in two ways, using the class constructor or applying their
functional form onto an existing ``IterDataPipe`` (recommended, available to most but not all DataPipes).
You can chain multiple `IterDataPipe` together to form a pipeline that will perform multiple
operations in succession.
.. _GitHub IterDataPipe Single Iterator Issue:
https://github.com/pytorch/data/issues/45
Note:
When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
item in the DataPipe will be yielded from the :class:`~torch.utils.data.DataLoader`
iterator. When :attr:`num_workers > 0`, each worker process will have a
different copy of the DataPipe object, so it is often desired to configure
each copy independently to avoid having duplicate data returned from the
workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
process, returns information about the worker. It can be used in either the
dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
:attr:`worker_init_fn` option to modify each copy's behavior.
Examples:
General Usage:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper, Mapper
>>> dp = IterableWrapper(range(10))
>>> map_dp_1 = Mapper(dp, lambda x: x + 1) # Using class constructor
>>> map_dp_2 = dp.map(lambda x: x + 1) # Using functional form (recommended)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> filter_dp = map_dp_1.filter(lambda x: x % 2 == 0)
>>> list(filter_dp)
[2, 4, 6, 8, 10]
Single Iterator Constraint Example:
>>> from torchdata.datapipes.iter import IterableWrapper, Mapper
>>> dp = IterableWrapper(range(10))
>>> it1 = iter(source_dp)
>>> list(it1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> it1 = iter(source_dp)
>>> it2 = iter(source_dp) # The creation of a new iterator invalidates `it1`
>>> next(it2)
0
>>> next(it1) # Further usage of `it1` will raise a `RunTimeError`
"""
functions: Dict[str, Callable] = {}
reduce_ex_hook: Optional[Callable] = None
getstate_hook: Optional[Callable] = None
str_hook: Optional[Callable] = None
repr_hook: Optional[Callable] = None
_valid_iterator_id: Optional[int] = None
_number_of_samples_yielded: int = 0
_snapshot_state: _SnapshotState = _SnapshotState.NotStarted
_fast_forward_iterator: Optional[Iterator] = None
def __getattr__(self, attribute_name):
if attribute_name in IterDataPipe.functions:
if attribute_name in _iter_deprecated_functional_names:
kwargs = _iter_deprecated_functional_names[attribute_name]
_deprecation_warning(**kwargs)
function = functools.partial(IterDataPipe.functions[attribute_name], self)
return function
else:
raise AttributeError("'{0}' object has no attribute '{1}".format(self.__class__.__name__, attribute_name))
@classmethod
def register_function(cls, function_name, function):
cls.functions[function_name] = function
@classmethod
def register_datapipe_as_function(cls, function_name, cls_to_register, enable_df_api_tracing=False):
if function_name in cls.functions:
raise Exception("Unable to add DataPipe function name {} as it is already taken".format(function_name))
def class_function(cls, enable_df_api_tracing, source_dp, *args, **kwargs):
result_pipe = cls(source_dp, *args, **kwargs)
if isinstance(result_pipe, IterDataPipe):
if enable_df_api_tracing or isinstance(source_dp, DFIterDataPipe):
if function_name not in UNTRACABLE_DATAFRAME_PIPES:
result_pipe = result_pipe.trace_as_dataframe()
return result_pipe
function = functools.partial(class_function, cls_to_register, enable_df_api_tracing)
cls.functions[function_name] = function
def __getstate__(self):
"""
This contains special logic to serialize `lambda` functions when `dill` is available.
If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
`__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
"""
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(self)
return self.__dict__
def __reduce_ex__(self, *args, **kwargs):
if IterDataPipe.reduce_ex_hook is not None:
try:
return IterDataPipe.reduce_ex_hook(self)
except NotImplementedError:
pass
return super().__reduce_ex__(*args, **kwargs)
@classmethod
def set_getstate_hook(cls, hook_fn):
if IterDataPipe.getstate_hook is not None and hook_fn is not None:
raise Exception("Attempt to override existing getstate_hook")
IterDataPipe.getstate_hook = hook_fn
@classmethod
def set_reduce_ex_hook(cls, hook_fn):
if IterDataPipe.reduce_ex_hook is not None and hook_fn is not None:
raise Exception("Attempt to override existing reduce_ex_hook")
IterDataPipe.reduce_ex_hook = hook_fn
def __repr__(self):
if self.repr_hook is not None:
return self.repr_hook(self)
# Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __str__(self):
if self.str_hook is not None:
return self.str_hook(self)
# Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def reset(self) -> None:
r"""
Reset the `IterDataPipe` to the initial state. By default, no-op. For subclasses of `IterDataPipe`,
depending on their functionalities, they may want to override this method with implementations that
may clear the buffers and reset pointers of the DataPipe.
The `reset` method is always called when `__iter__` is called as part of `hook_iterator`.
"""
pass
class DFIterDataPipe(IterDataPipe):
def _is_dfpipe(self):
return True
class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta):
r"""
Map-style DataPipe.
All datasets that represent a map from keys to data samples should subclass this.
Subclasses should overwrite :meth:`__getitem__`, supporting fetching a
data sample for a given, unique key. Subclasses can also optionally overwrite
:meth:`__len__`, which is expected to return the size of the dataset by many
:class:`~torch.utils.data.Sampler` implementations and the default options
of :class:`~torch.utils.data.DataLoader`.
These DataPipes can be invoked in two ways, using the class constructor or applying their
functional form onto an existing `MapDataPipe` (recommend, available to most but not all DataPipes).
Note:
:class:`~torch.utils.data.DataLoader` by default constructs an index
sampler that yields integral indices. To make it work with a map-style
DataPipe with non-integral indices/keys, a custom sampler must be provided.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper, Mapper
>>> dp = SequenceWrapper(range(10))
>>> map_dp_1 = dp.map(lambda x: x + 1) # Using functional form (recommended)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> map_dp_2 = Mapper(dp, lambda x: x + 1) # Using class constructor
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> batch_dp = map_dp_1.batch(batch_size=2)
>>> list(batch_dp)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
"""
functions: Dict[str, Callable] = {}
reduce_ex_hook: Optional[Callable] = None
getstate_hook: Optional[Callable] = None
str_hook: Optional[Callable] = None
repr_hook: Optional[Callable] = None
def __getattr__(self, attribute_name):
if attribute_name in MapDataPipe.functions:
if attribute_name in _map_deprecated_functional_names:
kwargs = _map_deprecated_functional_names[attribute_name]
_deprecation_warning(**kwargs)
function = functools.partial(MapDataPipe.functions[attribute_name], self)
return function
else:
raise AttributeError("'{0}' object has no attribute '{1}".format(self.__class__.__name__, attribute_name))
@classmethod
def register_function(cls, function_name, function):
cls.functions[function_name] = function
@classmethod
def register_datapipe_as_function(cls, function_name, cls_to_register):
if function_name in cls.functions:
raise Exception("Unable to add DataPipe function name {} as it is already taken".format(function_name))
def class_function(cls, source_dp, *args, **kwargs):
result_pipe = cls(source_dp, *args, **kwargs)
return result_pipe
function = functools.partial(class_function, cls_to_register)
cls.functions[function_name] = function
def __getstate__(self):
"""
This contains special logic to serialize `lambda` functions when `dill` is available.
If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
`__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
"""
if MapDataPipe.getstate_hook is not None:
return MapDataPipe.getstate_hook(self)
return self.__dict__
def __reduce_ex__(self, *args, **kwargs):
if MapDataPipe.reduce_ex_hook is not None:
try:
return MapDataPipe.reduce_ex_hook(self)
except NotImplementedError:
pass
return super().__reduce_ex__(*args, **kwargs)
@classmethod
def set_getstate_hook(cls, hook_fn):
if MapDataPipe.getstate_hook is not None and hook_fn is not None:
raise Exception("Attempt to override existing getstate_hook")
MapDataPipe.getstate_hook = hook_fn
@classmethod
def set_reduce_ex_hook(cls, hook_fn):
if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None:
raise Exception("Attempt to override existing reduce_ex_hook")
MapDataPipe.reduce_ex_hook = hook_fn
def __repr__(self):
if self.repr_hook is not None:
return self.repr_hook(self)
# Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __str__(self):
if self.str_hook is not None:
return self.str_hook(self)
# Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
class _DataPipeSerializationWrapper:
def __init__(self, datapipe):
self._datapipe = datapipe
def __getstate__(self):
use_dill = False
try:
value = pickle.dumps(self._datapipe)
except Exception:
if HAS_DILL:
value = dill.dumps(self._datapipe)
use_dill = True
else:
raise
return (value, use_dill)
def __setstate__(self, state):
value, use_dill = state
if use_dill:
self._datapipe = dill.loads(value)
else:
self._datapipe = pickle.loads(value)
def __len__(self):
try:
return len(self._datapipe)
except Exception:
raise TypeError(
"{} instance doesn't have valid length".format(type(self).__name__)
)
class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe):
def __iter__(self):
yield from self._datapipe
class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe):
def __getitem__(self, idx):
return self._datapipe[idx]
class DataChunk(list, Generic[T]):
def __init__(self, items):
super().__init__(items)
self.items = items
def as_str(self, indent=''):
res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]"
return res
def __iter__(self) -> Iterator[T]:
for i in super().__iter__():
yield i
def raw_iterator(self) -> T: # type: ignore[misc]
for i in self.items:
yield i
|
pytorch-master
|
torch/utils/data/datapipes/datapipe.py
|
from . import iter
from . import map
from . import dataframe
|
pytorch-master
|
torch/utils/data/datapipes/__init__.py
|
import os
import pathlib
from typing import Any, Dict, List, Set, Tuple, Union
def materialize_lines(lines: List[str], indentation: int) -> str:
output = ""
new_line_with_indent = "\n" + " " * indentation
for i, line in enumerate(lines):
if i != 0:
output += new_line_with_indent
output += line.replace('\n', new_line_with_indent)
return output
def gen_from_template(dir: str, template_name: str, output_name: str, replacements: List[Tuple[str, Any, int]]):
template_path = os.path.join(dir, template_name)
output_path = os.path.join(dir, output_name)
with open(template_path, "r") as f:
content = f.read()
for placeholder, lines, indentation in replacements:
with open(output_path, "w") as f:
content = content.replace(placeholder, materialize_lines(lines, indentation))
f.write(content)
def find_file_paths(dir_paths: List[str], files_to_exclude: Set[str]) -> Set[str]:
"""
When given a path to a directory, returns the paths to the relevant files within it.
This function does NOT recursive traverse to subdirectories.
"""
paths: Set[str] = set()
for dir_path in dir_paths:
all_files = os.listdir(dir_path)
python_files = {fname for fname in all_files if ".py" == fname[-3:]}
filter_files = {fname for fname in python_files if fname not in files_to_exclude}
paths.update({os.path.join(dir_path, fname) for fname in filter_files})
return paths
def extract_method_name(line: str) -> str:
"""
Extracts method name from decorator in the form of "@functional_datapipe({method_name})"
"""
if "(\"" in line:
start_token, end_token = "(\"", "\")"
elif "(\'" in line:
start_token, end_token = "(\'", "\')"
else:
raise RuntimeError(f"Unable to find appropriate method name within line:\n{line}")
start, end = line.find(start_token) + len(start_token), line.find(end_token)
return line[start:end]
def extract_class_name(line: str) -> str:
"""
Extracts class name from class definition in the form of "class {CLASS_NAME}({Type}):"
"""
start_token = "class "
end_token = "("
start, end = line.find(start_token) + len(start_token), line.find(end_token)
return line[start:end]
def parse_datapipe_file(file_path: str) -> Tuple[Dict[str, str], Dict[str, str], Set[str]]:
"""
Given a path to file, parses the file and returns a dictionary of method names to function signatures.
"""
method_to_signature, method_to_class_name, special_output_type = {}, {}, set()
with open(file_path) as f:
open_paren_count = 0
method_name, class_name, signature = "", "", ""
skip = False
for line in f.readlines():
if line.count("\"\"\"") % 2 == 1:
skip = not skip
if skip or "\"\"\"" in line: # Skipping comment/example blocks
continue
if "@functional_datapipe" in line:
method_name = extract_method_name(line)
continue
if method_name and "class " in line:
class_name = extract_class_name(line)
continue
if method_name and ("def __init__(" in line or "def __new__(" in line):
if "def __new__(" in line:
special_output_type.add(method_name)
open_paren_count += 1
start = line.find("(") + len("(")
line = line[start:]
if open_paren_count > 0:
open_paren_count += line.count('(')
open_paren_count -= line.count(')')
if open_paren_count == 0:
end = line.rfind(')')
signature += line[:end]
method_to_signature[method_name] = process_signature(signature)
method_to_class_name[method_name] = class_name
method_name, class_name, signature = "", "", ""
elif open_paren_count < 0:
raise RuntimeError("open parenthesis count < 0. This shouldn't be possible.")
else:
signature += line.strip('\n').strip(' ')
return method_to_signature, method_to_class_name, special_output_type
def parse_datapipe_files(file_paths: Set[str]) -> Tuple[Dict[str, str], Dict[str, str], Set[str]]:
methods_and_signatures, methods_and_class_names, methods_with_special_output_types = {}, {}, set()
for path in file_paths:
method_to_signature, method_to_class_name, methods_needing_special_output_types = parse_datapipe_file(path)
methods_and_signatures.update(method_to_signature)
methods_and_class_names.update(method_to_class_name)
methods_with_special_output_types.update(methods_needing_special_output_types)
return methods_and_signatures, methods_and_class_names, methods_with_special_output_types
def split_outside_bracket(line: str, delimiter: str = ",") -> List[str]:
"""
Given a line of text, split it on comma unless the comma is within a bracket '[]'.
"""
bracket_count = 0
curr_token = ""
res = []
for char in line:
if char == "[":
bracket_count += 1
elif char == "]":
bracket_count -= 1
elif char == delimiter and bracket_count == 0:
res.append(curr_token)
curr_token = ""
continue
curr_token += char
res.append(curr_token)
return res
def process_signature(line: str) -> str:
"""
Given a raw function signature, clean it up by removing the self-referential datapipe argument,
default arguments of input functions, newlines, and spaces.
"""
tokens: List[str] = split_outside_bracket(line)
for i, token in enumerate(tokens):
tokens[i] = token.strip(' ')
if token == "cls":
tokens[i] = "self"
elif i > 0 and ("self" == tokens[i - 1]) and (tokens[i][0] != "*"):
# Remove the datapipe after 'self' or 'cls' unless it has '*'
tokens[i] = ""
elif "Callable =" in token: # Remove default argument if it is a function
head, default_arg = token.rsplit("=", 2)
tokens[i] = head.strip(' ') + "= ..."
tokens = [t for t in tokens if t != ""]
line = ', '.join(tokens)
return line
def get_method_definitions(file_path: Union[str, List[str]],
files_to_exclude: Set[str],
deprecated_files: Set[str],
default_output_type: str,
method_to_special_output_type: Dict[str, str],
root: str = "") -> List[str]:
"""
.pyi generation for functional DataPipes Process
# 1. Find files that we want to process (exclude the ones who don't)
# 2. Parse method name and signature
# 3. Remove first argument after self (unless it is "*datapipes"), default args, and spaces
"""
if root == "":
root = str(pathlib.Path(__file__).parent.resolve())
file_path = [file_path] if isinstance(file_path, str) else file_path
file_path = [os.path.join(root, path) for path in file_path]
file_paths = find_file_paths(file_path,
files_to_exclude=files_to_exclude.union(deprecated_files))
methods_and_signatures, methods_and_class_names, methods_w_special_output_types = \
parse_datapipe_files(file_paths)
method_definitions = []
for method_name, arguments in methods_and_signatures.items():
class_name = methods_and_class_names[method_name]
if method_name in methods_w_special_output_types:
output_type = method_to_special_output_type[method_name]
else:
output_type = default_output_type
method_definitions.append(f"# Functional form of '{class_name}'\n"
f"def {method_name}({arguments}) -> {output_type}: ...")
method_definitions.sort(key=lambda s: s.split('\n')[1]) # sorting based on method_name
return method_definitions
# Defined outside of main() so they can be imported by TorchData
iterDP_file_path: str = "iter"
iterDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"}
iterDP_deprecated_files: Set[str] = set()
iterDP_method_to_special_output_type: Dict[str, str] = {"demux": "List[IterDataPipe]", "fork": "List[IterDataPipe]"}
mapDP_file_path: str = "map"
mapDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"}
mapDP_deprecated_files: Set[str] = set()
mapDP_method_to_special_output_type: Dict[str, str] = {}
def main() -> None:
"""
# Inject file into template datapipe.pyi.in
TODO: The current implementation of this script only generates interfaces for built-in methods. To generate
interface for user-defined DataPipes, consider changing `IterDataPipe.register_datapipe_as_function`.
"""
iter_method_definitions = get_method_definitions(iterDP_file_path, iterDP_files_to_exclude, iterDP_deprecated_files,
"IterDataPipe", iterDP_method_to_special_output_type)
map_method_definitions = get_method_definitions(mapDP_file_path, mapDP_files_to_exclude, mapDP_deprecated_files,
"MapDataPipe", mapDP_method_to_special_output_type)
path = pathlib.Path(__file__).parent.resolve()
replacements = [('${IterDataPipeMethods}', iter_method_definitions, 4),
('${MapDataPipeMethods}', map_method_definitions, 4)]
gen_from_template(dir=str(path),
template_name="datapipe.pyi.in",
output_name="datapipe.pyi",
replacements=replacements)
if __name__ == '__main__':
print("Generating Python interface file 'datapipe.pyi'...")
main()
|
pytorch-master
|
torch/utils/data/datapipes/gen_pyi.py
|
from torch.utils.data.datapipes.dataframe.dataframes import (
CaptureDataFrame, DFIterDataPipe,
)
from torch.utils.data.datapipes.dataframe.datapipes import (
DataFramesAsTuplesPipe,
)
__all__ = ['CaptureDataFrame', 'DFIterDataPipe', 'DataFramesAsTuplesPipe']
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
pytorch-master
|
torch/utils/data/datapipes/dataframe/__init__.py
|
import random
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
__all__ = [
"ConcatDataFramesPipe",
"DataFramesAsTuplesPipe",
"ExampleAggregateAsDataFrames",
"FilterDataFramesPipe",
"PerRowDataFramesPipe",
"ShuffleDataFramesPipe",
]
@functional_datapipe('_dataframes_as_tuples')
class DataFramesAsTuplesPipe(IterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
def __iter__(self):
for df in self.source_datapipe:
# for record in df.to_records(index=False):
for record in df_wrapper.iterate(df):
yield record
@functional_datapipe('_dataframes_per_row', enable_df_api_tracing=True)
class PerRowDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
def __iter__(self):
for df in self.source_datapipe:
# TODO(VitalyFedyunin): Replacing with TorchArrow only API, as we are dropping pandas as followup
for i in range(len(df)):
yield df[i:i + 1]
@functional_datapipe('_dataframes_concat', enable_df_api_tracing=True)
class ConcatDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe, batch=3):
self.source_datapipe = source_datapipe
self.n_batch = batch
def __iter__(self):
buffer = []
for df in self.source_datapipe:
buffer.append(df)
if len(buffer) == self.n_batch:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
yield df_wrapper.concat(buffer)
@functional_datapipe('_dataframes_shuffle', enable_df_api_tracing=True)
class ShuffleDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
def __iter__(self):
size = None
all_buffer = []
for df in self.source_datapipe:
if size is None:
size = df_wrapper.get_len(df)
for i in range(df_wrapper.get_len(df)):
all_buffer.append(df_wrapper.get_item(df, i))
random.shuffle(all_buffer)
buffer = []
for df in all_buffer:
buffer.append(df)
if len(buffer) == size:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
yield df_wrapper.concat(buffer)
@functional_datapipe('_dataframes_filter', enable_df_api_tracing=True)
class FilterDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe, filter_fn):
self.source_datapipe = source_datapipe
self.filter_fn = filter_fn
def __iter__(self):
size = None
all_buffer = []
filter_res = []
for df in self.source_datapipe:
if size is None:
size = len(df.index)
for i in range(len(df.index)):
all_buffer.append(df[i:i + 1])
filter_res.append(self.filter_fn(df.iloc[i]))
buffer = []
for df, res in zip(all_buffer, filter_res):
if res:
buffer.append(df)
if len(buffer) == size:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
yield df_wrapper.concat(buffer)
@functional_datapipe('_to_dataframes_pipe', enable_df_api_tracing=True)
class ExampleAggregateAsDataFrames(DFIterDataPipe):
def __init__(self, source_datapipe, dataframe_size=10, columns=None):
self.source_datapipe = source_datapipe
self.columns = columns
self.dataframe_size = dataframe_size
def _as_list(self, item):
try:
return list(item)
except Exception: # TODO(VitalyFedyunin): Replace with better iterable exception
return [item]
def __iter__(self):
aggregate = []
for item in self.source_datapipe:
aggregate.append(self._as_list(item))
if len(aggregate) == self.dataframe_size:
yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
aggregate = []
if len(aggregate) > 0:
yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
|
pytorch-master
|
torch/utils/data/datapipes/dataframe/datapipes.py
|
_pandas = None
_WITH_PANDAS = None
def _try_import_pandas() -> bool:
try:
import pandas # type: ignore[import]
global _pandas
_pandas = pandas
return True
except ImportError:
return False
# pandas used only for prototyping, will be shortly replaced with TorchArrow
def _with_pandas() -> bool:
global _WITH_PANDAS
if _WITH_PANDAS is None:
_WITH_PANDAS = _try_import_pandas()
return _WITH_PANDAS
class PandasWrapper:
@classmethod
def create_dataframe(cls, data, columns):
if not _with_pandas():
raise Exception("DataFrames prototype requires pandas to function")
return _pandas.DataFrame(data, columns=columns) # type: ignore[union-attr]
@classmethod
def is_dataframe(cls, data):
if not _with_pandas():
return False
return isinstance(data, _pandas.core.frame.DataFrame) # type: ignore[union-attr]
@classmethod
def is_column(cls, data):
if not _with_pandas():
return False
return isinstance(data, _pandas.core.series.Series) # type: ignore[union-attr]
@classmethod
def iterate(cls, data):
if not _with_pandas():
raise Exception("DataFrames prototype requires pandas to function")
for d in data.itertuples(index=False):
yield d
@classmethod
def concat(cls, buffer):
if not _with_pandas():
raise Exception("DataFrames prototype requires pandas to function")
return _pandas.concat(buffer) # type: ignore[union-attr]
@classmethod
def get_item(cls, data, idx):
if not _with_pandas():
raise Exception("DataFrames prototype requires pandas to function")
return data[idx: idx + 1]
@classmethod
def get_len(cls, df):
if not _with_pandas():
raise Exception("DataFrames prototype requires pandas to function")
return len(df.index)
@classmethod
def get_columns(cls, df):
if not _with_pandas():
raise Exception("DataFrames prototype requires pandas to function")
return list(df.columns.values.tolist())
# When you build own implementation just override it with dataframe_wrapper.set_df_wrapper(new_wrapper_class)
default_wrapper = PandasWrapper
def get_df_wrapper():
return default_wrapper
def set_df_wrapper(wrapper):
global default_wrapper
default_wrapper = wrapper
def create_dataframe(data, columns=None):
wrapper = get_df_wrapper()
return wrapper.create_dataframe(data, columns)
def is_dataframe(data):
wrapper = get_df_wrapper()
return wrapper.is_dataframe(data)
def get_columns(data):
wrapper = get_df_wrapper()
return wrapper.get_columns(data)
def is_column(data):
wrapper = get_df_wrapper()
return wrapper.is_column(data)
def concat(buffer):
wrapper = get_df_wrapper()
return wrapper.concat(buffer)
def iterate(data):
wrapper = get_df_wrapper()
return wrapper.iterate(data)
def get_item(data, idx):
wrapper = get_df_wrapper()
return wrapper.get_item(data, idx)
def get_len(df):
wrapper = get_df_wrapper()
return wrapper.get_len(df)
|
pytorch-master
|
torch/utils/data/datapipes/dataframe/dataframe_wrapper.py
|
from torch.utils.data.datapipes.datapipe import DataChunk
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
__all__ = ["DataChunkDF", ]
class DataChunkDF(DataChunk):
"""
DataChunkDF iterating over individual items inside of DataFrame containers,
to access DataFrames user `raw_iterator`
"""
def __iter__(self):
for df in self.items:
for record in df_wrapper.iterate(df):
yield record
def __len__(self):
total_len = 0
for df in self.items:
total_len += df_wrapper.get_len(df)
return total_len
|
pytorch-master
|
torch/utils/data/datapipes/dataframe/structures.py
|
from typing import Any, Dict, List
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
from torch.utils.data.datapipes.dataframe.structures import DataChunkDF
# TODO(VitalyFedyunin): Add error when two different traces get combined
__all__ = [
"Capture",
"CaptureA",
"CaptureAdd",
"CaptureCall",
"CaptureControl",
"CaptureDataFrame",
"CaptureDataFrameWithDataPipeOps",
"CaptureF",
"CaptureGetAttr",
"CaptureGetItem",
"CaptureInitial",
"CaptureLikeMock",
"CaptureMul",
"CaptureSetItem",
"CaptureSub",
"CaptureVariable",
"CaptureVariableAssign",
"DataFrameTracer",
"DataFrameTracedOps",
"disable_capture",
"get_val",
]
def disable_capture():
CaptureControl.disabled = True
class CaptureControl():
disabled = False
class DataFrameTracedOps(DFIterDataPipe):
def __init__(self, source_datapipe, output_var):
self.source_datapipe = source_datapipe
self.output_var = output_var
def __iter__(self):
for item in self.source_datapipe:
yield self.output_var.apply_ops(item)
# TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registred functions
DATAPIPES_OPS = ['_dataframes_as_tuples', 'groupby', '_dataframes_filter', 'map', 'to_datapipe',
'shuffle', 'concat', 'batch', '_dataframes_per_row', '_dataframes_concat', '_dataframes_shuffle']
UNIMPLEMENTED_ATTR = ['__deepcopy__', '__setstate__', 'is_shardable', 'apply_sharding']
class Capture(object):
# TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures
def __init__(self, schema_df=None):
self.ctx = {'operations': [], 'variables': [], 'schema_df': schema_df}
def __str__(self):
return self._ops_str()
def _ops_str(self):
res = ""
for op in self.ctx['operations']:
if len(res) > 0:
res += "\n"
res += str(op)
return res
def __getstate__(self):
# TODO(VitalyFedyunin): Currently can't pickle (why?)
self.ctx['schema_df'] = None
for var in self.ctx['variables']:
var.calculated_value = None
state = {}
for item in self.__dict__:
state[item] = getattr(self, item)
return state
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __getattr__(self, attrname):
if attrname == 'kwarg' or attrname == 'kwargs':
raise Exception('no kwargs!')
if attrname in ['__deepcopy__']:
raise AttributeError()
result = CaptureGetAttr(self, attrname, ctx=self.ctx)
return result
def __getitem__(self, key):
return CaptureGetItem(self, key, ctx=self.ctx)
def __setitem__(self, key, value):
self.ctx['operations'].append(
CaptureSetItem(self, key, value, ctx=self.ctx))
def __add__(self, add_val):
res = CaptureAdd(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
self.ctx['operations'].append(
CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
return var
def __sub__(self, add_val):
res = CaptureSub(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
self.ctx['operations'].append(
CaptureVariableAssign(variable=var, value=res, ctx=self.ctx))
return var
def __mul__(self, add_val):
res = CaptureMul(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
self.ctx['operations'].append(t)
return var
def _is_context_empty(self):
return len(self.ctx['operations']) == 0 and len(self.ctx['variables']) == 0
def apply_ops_2(self, dataframe):
# TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
self.ctx['variables'][0].calculated_value = dataframe
for op in self.ctx['operations']:
op.execute()
@property
def columns(self):
self.apply_ops_2(self.ctx['schema_df'])
value = self.execute()
return value.columns
# TODO(VitalyFedyunin): Add tests
# TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture
def __call__(self, *args, **kwargs):
# TODO: Check if args or kwargs have more than one different context
if self._is_context_empty():
# TODO: Allow CaptureA to take context from mock
for arg in args:
if isinstance(arg, Capture) and not arg._is_context_empty():
self.ctx = arg.ctx
break
if self._is_context_empty():
for k, v in kwargs.items():
if isinstance(k, Capture) and not k._is_context_empty():
self.ctx = k.ctx
break
if isinstance(v, Capture) and not v._is_context_empty():
self.ctx = v.ctx
break
res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs)
var = CaptureVariable(None, ctx=self.ctx)
t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res)
self.ctx['operations'].append(t)
return var
class CaptureF(Capture):
def __init__(self, ctx=None, **kwargs):
if ctx is None:
self.ctx = {'operations': [], 'variables': []}
else:
self.ctx = ctx
self.kwargs = kwargs
class CaptureA(CaptureF):
def __str__(self):
return '{name}'.format(name=self.kwargs['name'])
def execute(self):
value = self.kwargs['real_attribute']
return value
class CaptureLikeMock():
def __init__(self, name):
import unittest.mock as mock
# TODO(VitalyFedyunin): Do not use provate function here, copy own implementation instead.
get_target, attribute = mock._get_target(name) # type: ignore[attr-defined]
self.get_target = get_target
self.attribute = attribute
self.name = name
def __enter__(self):
self.save = getattr(self.get_target(), self.attribute)
capt = CaptureA(name=self.name, real_attribute=self.save)
setattr(self.get_target(), self.attribute, capt)
def __exit__(self, *exc_info):
setattr(self.get_target(), self.attribute, self.save)
class CaptureCall(Capture):
def __init__(self, callable, ctx=None, **kwargs):
if ctx is None:
self.ctx = {'operations': [], 'variables': []}
else:
self.ctx = ctx
self.kwargs = kwargs
self.callable = callable
def __str__(self):
return "{callable}({args},{kwargs})".format(callable=self.callable, **self.kwargs)
def execute(self):
# TODO: VitalyFedyunin execute kwargs and maybe nestted structures
executed_args = []
for arg in self.kwargs['args']:
if isinstance(arg, Capture):
executed_args.append(arg.execute())
else:
executed_args.append(arg)
left = get_val(self.callable)
return left(*executed_args, **self.kwargs['kwargs'])
class CaptureVariableAssign(CaptureF):
def __str__(self):
variable = self.kwargs['variable']
value = self.kwargs['value']
return "{variable} = {value}".format(variable=variable, value=value)
def execute(self):
self.kwargs['variable'].calculated_value = self.kwargs['value'].execute()
class CaptureVariable(Capture):
# TODO(VitalyFedyunin): This should be atomic and thread safe
names_idx = 0
def __init__(self, value, ctx):
if CaptureControl.disabled:
raise Exception('Attempting to create capture variable with capture off')
self.ctx = ctx
self.value = value
self.name = 'var_%s' % CaptureVariable.names_idx
CaptureVariable.names_idx += 1
self.ctx['variables'].append(self)
def __str__(self):
return self.name
def execute(self):
return self.calculated_value
def apply_ops(self, dataframe):
# TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
self.ctx['variables'][0].calculated_value = dataframe
for op in self.ctx['operations']:
op.execute()
return self.calculated_value
class CaptureGetItem(Capture):
def __init__(self, left, key, ctx):
self.ctx = ctx
self.left = left
self.key = key
def __str__(self):
return "%s[%s]" % (self.left, get_val(self.key))
def execute(self):
left = self.left.execute()
return left[self.key]
class CaptureSetItem(Capture):
def __init__(self, left, key, value, ctx):
self.ctx = ctx
self.left = left
self.key = key
self.value = value
def __str__(self):
return "%s[%s] = %s" % (self.left, get_val(self.key), self.value)
def execute(self):
left = self.left.execute()
value = self.value.execute()
left[self.key] = value
class CaptureAdd(Capture):
def __init__(self, left, right, ctx):
self.ctx = ctx
self.left = left
self.right = right
def __str__(self):
return "%s + %s" % (self.left, self.right)
def execute(self):
return get_val(self.left) + get_val(self.right)
class CaptureMul(Capture):
def __init__(self, left, right, ctx):
self.ctx = ctx
self.left = left
self.right = right
def __str__(self):
return "%s * %s" % (self.left, self.right)
def execute(self):
return get_val(self.left) * get_val(self.right)
class CaptureSub(Capture):
def __init__(self, left, right, ctx):
self.ctx = ctx
self.left = left
self.right = right
def __str__(self):
return "%s - %s" % (self.left, self.right)
def execute(self):
return get_val(self.left) - get_val(self.right)
class CaptureGetAttr(Capture):
def __init__(self, src, name, ctx):
self.ctx = ctx
self.src = src
self.name = name
def __str__(self):
return "%s.%s" % (self.src, self.name)
def execute(self):
val = get_val(self.src)
return getattr(val, self.name)
def get_val(capture):
if isinstance(capture, Capture):
return capture.execute()
elif isinstance(capture, str):
return '"%s"' % capture
else:
return capture
class CaptureInitial(CaptureVariable):
def __init__(self, schema_df=None):
new_ctx: Dict[str, List[Any]] = {'operations': [], 'variables': [], 'schema_df': schema_df}
super().__init__(None, new_ctx)
self.name = 'input_%s' % self.name
class CaptureDataFrame(CaptureInitial):
pass
class CaptureDataFrameWithDataPipeOps(CaptureDataFrame):
def as_datapipe(self):
return DataFrameTracedOps(
self.ctx['variables'][0].source_datapipe, self)
def raw_iterator(self):
return self.as_datapipe().__iter__()
def __iter__(self):
return iter(self._dataframes_as_tuples())
def batch(self, batch_size=10, drop_last: bool = False, wrapper_class=DataChunkDF):
dp = self._dataframes_per_row()._dataframes_concat(batch_size)
dp = dp.as_datapipe().batch(1, drop_last=drop_last, wrapper_class=wrapper_class)
dp._dp_contains_dataframe = True
return dp
def groupby(self,
group_key_fn,
*,
buffer_size=10000,
group_size=None,
guaranteed_group_size=None,
drop_remaining=False):
dp = self._dataframes_per_row()
dp = dp.as_datapipe().groupby(group_key_fn, buffer_size=buffer_size, group_size=group_size,
guaranteed_group_size=guaranteed_group_size, drop_remaining=drop_remaining)
return dp
def shuffle(self, *args, **kwargs):
return self._dataframes_shuffle(*args, **kwargs)
def filter(self, *args, **kwargs):
return self._dataframes_filter(*args, **kwargs)
def collate(self, *args, **kwargs):
raise Exception("Can't collate unbatched DataFrames stream")
def __getattr__(self, attrname): # ?
if attrname in UNIMPLEMENTED_ATTR:
raise AttributeError('Attemping to get ', attrname)
if attrname in DATAPIPES_OPS:
return (self.as_datapipe()).__getattr__(attrname)
return super().__getattr__(attrname)
@functional_datapipe('trace_as_dataframe')
class DataFrameTracer(CaptureDataFrameWithDataPipeOps, IterDataPipe):
source_datapipe = None
# TODO(VitalyFedyunin): Must implement all special functions of datapipes
def set_shuffle_settings(self, *args, **kwargs):
pass
def is_shardable(self):
return False
def __init__(self, source_datapipe, schema_df=None):
self.source_datapipe = source_datapipe
if schema_df is None:
schema_df = next(iter(self.source_datapipe))
super().__init__(schema_df=schema_df)
|
pytorch-master
|
torch/utils/data/datapipes/dataframe/dataframes.py
|
from io import IOBase
from typing import Iterable, Tuple, Optional
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import get_file_binaries_from_pathnames, _deprecation_warning
__all__ = [
"FileOpenerIterDataPipe",
"FileLoaderIterDataPipe",
]
@functional_datapipe("open_files")
class FileOpenerIterDataPipe(IterDataPipe[Tuple[str, IOBase]]):
r"""
Given pathnames, opens files and yield pathname and file stream
in a tuple (functional name: ``open_files``).
Args:
datapipe: Iterable datapipe that provides pathnames
mode: An optional string that specifies the mode in which
the file is opened by ``open()``. It defaults to ``r``, other options are
``b`` for reading in binary mode and ``t`` for text mode.
encoding: An optional string that specifies the encoding of the
underlying file. It defaults to ``None`` to match the default encoding of ``open``.
length: Nominal length of the datapipe
Note:
The opened file handles will be closed by Python's GC periodically. Users can choose
to close them explicitly.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader
>>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt'))
>>> dp = FileOpener(dp)
>>> dp = StreamReader(dp)
>>> list(dp)
[('./abc.txt', 'abc')]
"""
def __init__(
self,
datapipe: Iterable[str],
mode: str = 'r',
encoding: Optional[str] = None,
length: int = -1):
super().__init__()
self.datapipe: Iterable = datapipe
self.mode: str = mode
self.encoding: Optional[str] = encoding
if self.mode not in ('b', 't', 'rb', 'rt', 'r'):
raise ValueError("Invalid mode {}".format(mode))
# TODO: enforce typing for each instance based on mode, otherwise
# `argument_validation` with this DataPipe may be potentially broken
if 'b' in mode and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
self.length: int = length
# Remove annotation due to 'IOBase' is a general type and true type
# is determined at runtime based on mode. Some `DataPipe` requiring
# a subtype would cause mypy error.
def __iter__(self):
yield from get_file_binaries_from_pathnames(self.datapipe, self.mode, self.encoding)
def __len__(self):
if self.length == -1:
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
return self.length
class FileLoaderIterDataPipe(IterDataPipe[Tuple[str, IOBase]]):
def __new__(
cls,
datapipe: Iterable[str],
mode: str = 'b',
length: int = -1):
_deprecation_warning(
cls.__name__,
deprecation_version="1.12",
removal_version="1.13",
new_class_name="FileOpener",
)
return FileOpenerIterDataPipe(datapipe=datapipe, mode=mode, length=length)
|
pytorch-master
|
torch/utils/data/datapipes/iter/fileopener.py
|
import functools
from collections import namedtuple
from typing import Callable, Iterator, Sized, TypeVar, Optional, Union, Any, Dict, List
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data._utils.collate import default_collate
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
__all__ = [
"CollatorIterDataPipe",
"MapperIterDataPipe",
]
T_co = TypeVar("T_co", covariant=True)
@functional_datapipe("map")
class MapperIterDataPipe(IterDataPipe[T_co]):
r"""
Applies a function over each item from the source DataPipe (functional name: ``map``).
The function can be any regular Python function or partial object. Lambda
function is not recommended as it is not supported by pickle.
Args:
datapipe: Source Iterable DataPipe
fn: Function being applied over each item
input_col: Index or indices of data which ``fn`` is applied, such as:
- ``None`` as default to apply ``fn`` to the data directly.
- Integer(s) is used for list/tuple.
- Key(s) is used for dict.
output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified
only when ``input_col`` is not ``None``
- ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with
multiple indices, the left-most one is used, and other indices will be removed.
- Integer is used for list/tuple. ``-1`` represents to append result at the end.
- Key is used for dict. New key is acceptable.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper, Mapper
>>> def add_one(x):
... return x + 1
>>> dp = IterableWrapper(range(10))
>>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle`
>>> # Use `functools.partial` or explicitly define the function instead
>>> map_dp_2 = Mapper(dp, lambda x: x + 1)
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
datapipe: IterDataPipe
fn: Callable
def __init__(
self,
datapipe: IterDataPipe,
fn: Callable,
input_col=None,
output_col=None,
) -> None:
super().__init__()
self.datapipe = datapipe
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
self.input_col = input_col
if input_col is None and output_col is not None:
raise ValueError("`output_col` must be None when `input_col` is None.")
if isinstance(output_col, (list, tuple)):
if len(output_col) > 1:
raise ValueError("`output_col` must be a single-element list or tuple")
output_col = output_col[0]
self.output_col = output_col
def _apply_fn(self, data):
if self.input_col is None and self.output_col is None:
return self.fn(data)
if self.input_col is None:
res = self.fn(data)
elif isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
res = self.fn(*args)
else:
res = self.fn(data[self.input_col])
# Copy tuple to list and run in-place modification because tuple is immutable.
if isinstance(data, tuple):
t_flag = True
data = list(data)
else:
t_flag = False
if self.output_col is None:
if isinstance(self.input_col, (list, tuple)):
data[self.input_col[0]] = res
for idx in sorted(self.input_col[1:], reverse=True):
del data[idx]
else:
data[self.input_col] = res
else:
if self.output_col == -1:
data.append(res)
else:
data[self.output_col] = res
# Convert list back to tuple
return tuple(data) if t_flag else data
def __iter__(self) -> Iterator[T_co]:
for data in self.datapipe:
yield self._apply_fn(data)
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(
"{} instance doesn't have valid length".format(type(self).__name__)
)
def _collate_helper(conversion, item):
# TODO(VitalyFedyunin): Verify that item is any sort of batch
if len(item.items) > 1:
# TODO(VitalyFedyunin): Compact all batch dataframes into one
raise Exception("Only supports one DataFrame per batch")
df = item[0]
columns_name = df_wrapper.get_columns(df)
tuple_names: List = []
tuple_values: List = []
for name in conversion.keys():
if name not in columns_name:
raise Exception("Conversion keys missmatch")
for name in columns_name:
if name in conversion:
if not callable(conversion[name]):
raise Exception('Collate (DF)DataPipe requires callable as dict values')
collation_fn = conversion[name]
else:
# TODO(VitalyFedyunin): Add default collation into df_wrapper
try:
import torcharrow.pytorch as tap # type: ignore[import]
collation_fn = tap.rec.Default()
except Exception:
raise Exception("unable to import default collation function from the TorchArrrow")
tuple_names.append(str(name))
value = collation_fn(df[name])
tuple_values.append(value)
# TODO(VitalyFedyunin): We can dynamically extract types from the tuple_values here
# TODO(VitalyFedyunin): Instead of ignoring mypy error, make sure tuple_names is not empty
tpl_cls = namedtuple("CollateResult", tuple_names) # type: ignore[misc]
tuple = tpl_cls(*tuple_values)
return tuple
@functional_datapipe("collate")
class CollatorIterDataPipe(MapperIterDataPipe):
r"""
Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``).
By default, it uses :func:`torch.utils.data.default_collate`.
.. note::
While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the
default behavior and `functools.partial` to specify any additional arguments.
Args:
datapipe: Iterable DataPipe being collated
collate_fn: Customized collate function to collect and combine data or a batch of data.
Default function collates to Tensor(s) based on data type.
Example: Convert integer data to float Tensor
>>> class MyIterDataPipe(torch.utils.data.IterDataPipe):
... def __init__(self, start, end):
... super(MyIterDataPipe).__init__()
... assert end > start, "this example code only works with end >= start"
... self.start = start
... self.end = end
...
... def __iter__(self):
... return iter(range(self.start, self.end))
...
... def __len__(self):
... return self.end - self.start
...
>>> ds = MyIterDataPipe(start=3, end=7)
>>> print(list(ds))
[3, 4, 5, 6]
>>> def collate_fn(batch):
... return torch.tensor(batch, dtype=torch.float)
...
>>> # xdoctest: +SKIP
>>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn)
>>> print(list(collated_ds))
[tensor(3.), tensor(4.), tensor(5.), tensor(6.)]
"""
def __init__(
self,
datapipe: IterDataPipe,
conversion: Optional[
Union[
Callable[..., Any],
Dict[Union[str, Any], Union[Callable, Any]],
]
] = default_collate,
collate_fn: Optional[Callable] = None,
) -> None:
# TODO(VitalyFedyunin): Replace `Callable[..., Any]` with `Callable[[IColumn], Any]`
# TODO(VitalyFedyunin): Replace with `Dict[Union[str, IColumn], Union[Callable, Enum]]`
if collate_fn is not None:
super().__init__(datapipe, fn=collate_fn)
else:
if callable(conversion):
super().__init__(datapipe, fn=conversion)
else:
# TODO(VitalyFedyunin): Validate passed dictionary
collate_fn = functools.partial(_collate_helper, conversion)
super().__init__(datapipe, fn=collate_fn)
|
pytorch-master
|
torch/utils/data/datapipes/iter/callable.py
|
from collections import defaultdict
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe, DataChunk
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
from typing import Any, Callable, DefaultDict, Iterator, List, Optional, Sized, TypeVar
__all__ = [
"BatcherIterDataPipe",
"GrouperIterDataPipe",
"ShardingFilterIterDataPipe",
"UnBatcherIterDataPipe",
]
T_co = TypeVar('T_co', covariant=True)
@functional_datapipe('sharding_filter')
class ShardingFilterIterDataPipe(IterDataPipe):
r"""
Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``). After ``apply_sharding`` is
called, each instance of the DataPipe (on different workers) will have every `n`-th element of the
original DataPipe, where `n` equals to the number of instances.
Args:
source_datapipe: Iterable DataPipe that will be sharded
"""
def __init__(self, source_datapipe: IterDataPipe):
self.source_datapipe = source_datapipe
self.num_of_instances = 1
self.instance_id = 0
def is_shardable(self):
return True
def apply_sharding(self, num_of_instances, instance_id):
self.num_of_instances = num_of_instances
self.instance_id = instance_id
def __iter__(self):
for i, item in enumerate(self.source_datapipe):
if i % self.num_of_instances == self.instance_id:
yield item
def __len__(self):
if isinstance(self.source_datapipe, Sized):
return len(self.source_datapipe) // self.num_of_instances +\
(1 if (self.instance_id < len(self.source_datapipe) % self.num_of_instances) else 0)
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
@functional_datapipe('batch')
class BatcherIterDataPipe(IterDataPipe[DataChunk]):
r"""
Creates mini-batches of data (functional name: ``batch``). An outer dimension will be added as
``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the
last batch if ``drop_last`` is set to ``False``.
Args:
datapipe: Iterable DataPipe being batched
batch_size: The size of each batch
drop_last: Option to drop the last batch if it's not full
wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding,
defaults to ``DataChunk``
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> dp = dp.batch(batch_size=3, drop_last=True)
>>> list(dp)
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
datapipe: IterDataPipe
batch_size: int
drop_last: bool
length: Optional[int]
def __init__(self,
datapipe: IterDataPipe,
batch_size: int,
drop_last: bool = False,
wrapper_class=DataChunk,
) -> None:
assert batch_size > 0, "Batch size is required to be larger than 0!"
super().__init__()
self.datapipe = datapipe
self.batch_size = batch_size
self.drop_last = drop_last
self.length = None
self.wrapper_class = wrapper_class
def __iter__(self) -> Iterator[DataChunk]:
batch: List = []
for x in self.datapipe:
batch.append(x)
if len(batch) == self.batch_size:
yield self.wrapper_class(batch)
batch = []
if len(batch) > 0:
if not self.drop_last:
yield self.wrapper_class(batch)
def __len__(self) -> int:
if self.length is not None:
return self.length
if isinstance(self.datapipe, Sized):
if self.drop_last:
self.length = len(self.datapipe) // self.batch_size
else:
self.length = (len(self.datapipe) + self.batch_size - 1) // self.batch_size
return self.length
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
@functional_datapipe('unbatch')
class UnBatcherIterDataPipe(IterDataPipe):
r"""
Undoes batching of data (functional name: ``unbatch``). In other words, it flattens the data up to the specified level
within a batched DataPipe.
Args:
datapipe: Iterable DataPipe being un-batched
unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``,
it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]])
>>> dp1 = source_dp.unbatch()
>>> list(dp1)
[[0, 1], [2], [3, 4], [5], [6]]
>>> dp2 = source_dp.unbatch(unbatch_level=2)
>>> list(dp2)
[0, 1, 2, 3, 4, 5, 6]
"""
def __init__(self,
datapipe: IterDataPipe,
unbatch_level: int = 1):
self.datapipe = datapipe
self.unbatch_level = unbatch_level
def __iter__(self):
for element in self.datapipe:
for i in self._dive(element, unbatch_level=self.unbatch_level):
yield i
def _dive(self, element, unbatch_level):
if unbatch_level < -1:
raise ValueError("unbatch_level must be -1 or >= 0")
if unbatch_level == -1:
if isinstance(element, list) or isinstance(element, DataChunk):
for item in element:
for i in self._dive(item, unbatch_level=-1):
yield i
else:
yield element
elif unbatch_level == 0:
yield element
else:
if isinstance(element, list) or isinstance(element, DataChunk):
for item in element:
for i in self._dive(item, unbatch_level=unbatch_level - 1):
yield i
else:
raise IndexError(f"unbatch_level {self.unbatch_level} exceeds the depth of the DataPipe")
@functional_datapipe('groupby')
class GrouperIterDataPipe(IterDataPipe[DataChunk]):
r"""
Groups data from input IterDataPipe by keys which are generated from ``group_key_fn``,
and yields a ``DataChunk`` with batch size up to ``group_size`` if defined (functional name: ``groupby``).
The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group
will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full,
the DataPipe will yield the largest batch with the same key, provided that its size is larger
than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``.
After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity
will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``.
Args:
datapipe: Iterable datapipe to be grouped
group_key_fn: Function used to generate group key from the data of the source datapipe
buffer_size: The size of buffer for ungrouped data
group_size: The max size of each group, a batch is yielded as soon as it reaches this size
guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full
drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer
when the buffer is full
Example:
>>> import os
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def group_fn(file):
... return os.path.basename(file).split(".")[0]
>>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"])
>>> dp0 = source_dp.groupby(group_key_fn=group_fn)
>>> list(dp0)
[['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']]
>>> # A group is yielded as soon as its size equals to `group_size`
>>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2)
>>> list(dp1)
[['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
>>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size`
>>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2)
>>> list(dp2)
[['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
"""
def __init__(self,
datapipe: IterDataPipe[T_co],
group_key_fn: Callable,
*,
buffer_size: int = 10000,
group_size: Optional[int] = None,
guaranteed_group_size: Optional[int] = None,
drop_remaining: bool = False):
_check_unpickable_fn(group_key_fn)
self.datapipe = datapipe
self.group_key_fn = group_key_fn
self.max_buffer_size = buffer_size
self.buffer_elements: DefaultDict[Any, List] = defaultdict(list)
self.curr_buffer_size = 0
self.group_size = group_size
self.guaranteed_group_size = None
if group_size is not None and buffer_size is not None:
assert 0 < group_size <= buffer_size
self.guaranteed_group_size = group_size
if guaranteed_group_size is not None:
assert group_size is not None and 0 < guaranteed_group_size <= group_size
self.guaranteed_group_size = guaranteed_group_size
self.drop_remaining = drop_remaining
self.wrapper_class = DataChunk
def _remove_biggest_key(self):
biggest_key = None
biggest_size = 0
result_to_yield = None
for findkey in self.buffer_elements.keys():
if len(self.buffer_elements[findkey]) > biggest_size:
biggest_size = len(self.buffer_elements[findkey])
biggest_key = findkey
if self.guaranteed_group_size is not None and biggest_size < self.guaranteed_group_size and not self.drop_remaining:
raise RuntimeError('Failed to group items', str(self.buffer_elements[biggest_key]))
if self.guaranteed_group_size is None or biggest_size >= self.guaranteed_group_size:
result_to_yield = self.buffer_elements[biggest_key]
self.curr_buffer_size -= biggest_size
del self.buffer_elements[biggest_key]
return result_to_yield
def __iter__(self):
for x in self.datapipe:
key = self.group_key_fn(x)
self.buffer_elements[key].append(x)
self.curr_buffer_size += 1
if self.group_size is not None and self.group_size == len(self.buffer_elements[key]):
yield self.wrapper_class(self.buffer_elements[key])
self.curr_buffer_size -= len(self.buffer_elements[key])
del self.buffer_elements[key]
if self.curr_buffer_size == self.max_buffer_size:
result_to_yield = self._remove_biggest_key()
if result_to_yield is not None:
yield self.wrapper_class(result_to_yield)
for key in tuple(self.buffer_elements.keys()):
res = self.buffer_elements.pop(key)
self.curr_buffer_size -= len(res)
yield self.wrapper_class(res)
def reset(self) -> None:
self.curr_buffer_size = 0
self.buffer_elements = defaultdict(list)
def __getstate__(self):
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(self)
state = (
self.datapipe,
self.group_key_fn,
self.max_buffer_size,
self.group_size,
self.guaranteed_group_size,
self.drop_remaining,
self.wrapper_class,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
return state
def __setstate__(self, state):
(
self.datapipe,
self.group_key_fn,
self.max_buffer_size,
self.group_size,
self.guaranteed_group_size,
self.drop_remaining,
self.wrapper_class,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self.curr_buffer_size = 0
self.buffer_elements = defaultdict(list)
def __del__(self):
self.buffer_elements.clear()
|
pytorch-master
|
torch/utils/data/datapipes/iter/grouping.py
|
from torch.utils.data.datapipes.iter.utils import (
IterableWrapperIterDataPipe as IterableWrapper,
)
from torch.utils.data.datapipes.iter.callable import (
CollatorIterDataPipe as Collator,
MapperIterDataPipe as Mapper,
)
from torch.utils.data.datapipes.iter.combinatorics import (
SamplerIterDataPipe as Sampler,
ShufflerIterDataPipe as Shuffler,
)
from torch.utils.data.datapipes.iter.combining import (
ConcaterIterDataPipe as Concater,
DemultiplexerIterDataPipe as Demultiplexer,
ForkerIterDataPipe as Forker,
MultiplexerIterDataPipe as Multiplexer,
ZipperIterDataPipe as Zipper,
)
from torch.utils.data.datapipes.iter.filelister import (
FileListerIterDataPipe as FileLister,
)
from torch.utils.data.datapipes.iter.fileopener import (
FileLoaderIterDataPipe as FileLoader,
FileOpenerIterDataPipe as FileOpener,
)
from torch.utils.data.datapipes.iter.grouping import (
BatcherIterDataPipe as Batcher,
GrouperIterDataPipe as Grouper,
ShardingFilterIterDataPipe as ShardingFilter,
UnBatcherIterDataPipe as UnBatcher,
)
from torch.utils.data.datapipes.iter.routeddecoder import (
RoutedDecoderIterDataPipe as RoutedDecoder,
)
from torch.utils.data.datapipes.iter.selecting import (
FilterIterDataPipe as Filter,
)
from torch.utils.data.datapipes.iter.streamreader import (
StreamReaderIterDataPipe as StreamReader,
)
__all__ = ['Batcher',
'Collator',
'Concater',
'Demultiplexer',
'FileLister',
'FileLoader',
'FileOpener',
'Filter',
'Forker',
'Grouper',
'IterableWrapper',
'Mapper',
'Multiplexer',
'RoutedDecoder',
'Sampler',
'ShardingFilter',
'Shuffler',
'StreamReader',
'UnBatcher',
'Zipper']
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
pytorch-master
|
torch/utils/data/datapipes/iter/__init__.py
|
from typing import Callable, Iterator, Optional, TypeVar
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
from torch.utils.data.datapipes.utils.common import (
_check_unpickable_fn,
_deprecation_warning,
StreamWrapper,
)
__all__ = ["FilterIterDataPipe", ]
T_co = TypeVar('T_co', covariant=True)
@functional_datapipe('filter')
class FilterIterDataPipe(IterDataPipe[T_co]):
r"""
Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``).
Args:
datapipe: Iterable DataPipe being filtered
filter_fn: Customized function mapping an element to a boolean.
drop_empty_batches (Deprecated): By default, drops a batch if it is empty after filtering instead of keeping an empty list
input_col: Index or indices of data which ``filter_fn`` is applied, such as:
- ``None`` as default to apply ``filter_fn`` to the data directly.
- Integer(s) is used for list/tuple.
- Key(s) is used for dict.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def is_even(n):
... return n % 2 == 0
>>> dp = IterableWrapper(range(5))
>>> filter_dp = dp.filter(filter_fn=is_even)
>>> list(filter_dp)
[0, 2, 4]
"""
datapipe: IterDataPipe
filter_fn: Callable
drop_empty_batches: bool
def __init__(
self,
datapipe: IterDataPipe,
filter_fn: Callable,
drop_empty_batches: Optional[bool] = None,
input_col=None,
) -> None:
super().__init__()
self.datapipe = datapipe
_check_unpickable_fn(filter_fn)
self.filter_fn = filter_fn # type: ignore[assignment]
if drop_empty_batches is None:
drop_empty_batches = True
else:
_deprecation_warning(
type(self).__name__,
deprecation_version="1.12",
removal_version="1.14",
old_argument_name="drop_empty_batches",
)
self.drop_empty_batches = drop_empty_batches
self.input_col = input_col
def _apply_filter_fn(self, data) -> bool:
if self.input_col is None:
return self.filter_fn(data)
elif isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
return self.filter_fn(*args)
else:
return self.filter_fn(data[self.input_col])
def __iter__(self) -> Iterator[T_co]:
for data in self.datapipe:
filtered = self._returnIfTrue(data)
if self._isNonEmpty(filtered):
yield filtered
else:
StreamWrapper.close_streams(data)
def _returnIfTrue(self, data):
condition = self._apply_filter_fn(data)
if df_wrapper.is_column(condition):
# We are operating on DataFrames filter here
result = []
for idx, mask in enumerate(df_wrapper.iterate(condition)):
if mask:
result.append(df_wrapper.get_item(data, idx))
if len(result):
return df_wrapper.concat(result)
else:
return None
if not isinstance(condition, bool):
raise ValueError("Boolean output is required for `filter_fn` of FilterIterDataPipe, got", type(condition))
if condition:
return data
def _isNonEmpty(self, data):
if df_wrapper.is_dataframe(data):
return True
r = data is not None and \
not (isinstance(data, list) and len(data) == 0 and self.drop_empty_batches)
return r
|
pytorch-master
|
torch/utils/data/datapipes/iter/selecting.py
|
import warnings
from collections import deque
from typing import Any, Callable, Iterator, List, Optional, Sized, Tuple, TypeVar, Deque
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import StreamWrapper, _check_unpickable_fn
__all__ = [
"ConcaterIterDataPipe",
"DemultiplexerIterDataPipe",
"ForkerIterDataPipe",
"MultiplexerIterDataPipe",
"ZipperIterDataPipe",
]
T_co = TypeVar('T_co', covariant=True)
@functional_datapipe('concat')
class ConcaterIterDataPipe(IterDataPipe):
r"""
Concatenates multiple Iterable DataPipes (functional name: ``concat``). The resulting DataPipe will
yield all the elements from the first input DataPipe, before yielding from the subsequent ones.
Args:
datapipes: Iterable DataPipes being concatenated
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> import random
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1 = IterableWrapper(range(3))
>>> dp2 = IterableWrapper(range(5))
>>> list(dp1.concat(dp2))
[0, 1, 2, 0, 1, 2, 3, 4]
"""
datapipes: Tuple[IterDataPipe]
length: Optional[int]
def __init__(self, *datapipes: IterDataPipe):
if len(datapipes) == 0:
raise ValueError("Expected at least one DataPipe, but got nothing")
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
raise TypeError("Expected all inputs to be `IterDataPipe`")
self.datapipes = datapipes # type: ignore[assignment]
self.length = None
def __iter__(self) -> Iterator:
for dp in self.datapipes:
for data in dp:
yield data
def __len__(self) -> int:
if self.length is not None:
if self.length == -1:
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
return self.length
if all(isinstance(dp, Sized) for dp in self.datapipes):
self.length = sum(len(dp) for dp in self.datapipes)
else:
self.length = -1
return len(self)
@functional_datapipe('fork')
class ForkerIterDataPipe(IterDataPipe):
r"""
Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``).
Args:
datapipe: Iterable DataPipe being copied
num_instances: number of instances of the datapipe to create
buffer_size: this restricts how far ahead the leading child DataPipe
can read relative to the slowest child DataPipe.
Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(5))
>>> dp1, dp2 = source_dp.fork(num_instances=2)
>>> list(dp1)
[0, 1, 2, 3, 4]
>>> list(dp2)
[0, 1, 2, 3, 4]
"""
def __new__(cls, datapipe: IterDataPipe, num_instances: int, buffer_size: int = 1000):
if num_instances < 1:
raise ValueError(f"Expected `num_instaces` larger than 0, but {num_instances} is found")
if num_instances == 1:
return datapipe
container = _ForkerIterDataPipe(datapipe, num_instances, buffer_size)
return [_ChildDataPipe(container, i) for i in range(num_instances)]
class _ForkerIterDataPipe(IterDataPipe):
r"""
Container to hold instance-specific information on behalf of ForkerIterDataPipe. It tracks
the state of its child DataPipes, maintains the buffer, and yields the next value
as requested by the child DataPipes.
"""
def __init__(self, datapipe: IterDataPipe, num_instances: int, buffer_size: int = 1000):
self.main_datapipe = datapipe
self._datapipe_iterator: Optional[Iterator[Any]] = None
self.num_instances = num_instances
self.buffer: Deque = deque()
self.buffer_size = buffer_size
if self.buffer_size < 0:
warnings.warn(
"Unlimited buffer size is set for `fork`, "
"please be aware of OOM at random places",
UserWarning
)
self.child_pointers: List[int] = [0] * num_instances # Indicate the indices of the next element to get
self.slowest_ptr = 0 # The index to read by the slowest child
self.leading_ptr = 0 # The index to read by the fastest child
self.end_ptr: Optional[int] = None # The index to stop child
def __len__(self):
return len(self.main_datapipe)
def get_next_element_by_instance(self, instance_id: int):
if self._datapipe_iterator is None:
self._datapipe_iterator = iter(self.main_datapipe)
self._snapshot_state = _SnapshotState.Iterating
while self.end_ptr is None or self.child_pointers[instance_id] + 1 < self.end_ptr:
self.child_pointers[instance_id] += 1
# Use buffer
if self.buffer and self.child_pointers[instance_id] <= self.leading_ptr:
idx = self.child_pointers[instance_id] - self.slowest_ptr - 1
return_val = self.buffer[idx]
else: # Retreive one element from main datapipe
self.leading_ptr = self.child_pointers[instance_id]
try:
return_val = next(self._datapipe_iterator)
self.buffer.append(return_val)
except StopIteration:
self.end_ptr = self.leading_ptr
continue
if self.child_pointers[instance_id] == self.slowest_ptr + 1:
new_min = min(self.child_pointers) # Can optimize by avoiding the call to min()
if self.slowest_ptr < new_min:
self.slowest_ptr = new_min
self.buffer.popleft()
if self.buffer_size >= 0 and self.leading_ptr > self.buffer_size + self.slowest_ptr:
raise BufferError("ForkerIterDataPipe buffer overflow," +
f"buffer size {self.buffer_size} is insufficient.")
yield return_val
if all(p + 1 == self.end_ptr for p in self.child_pointers):
self._datapipe_iterator = None
def is_every_instance_exhausted(self) -> bool:
# Due to the implementation of `get_next_element_by_instance`, `self.end_ptr` will end up
# equaling to `len(main_datapipe) + 1`, hence the check for `self.end_ptr - 1 == ptr` below.
return self.end_ptr is not None and\
all(self.end_ptr == ptr or self.end_ptr - 1 == ptr for ptr in self.child_pointers)
def reset(self) -> None:
self._datapipe_iterator = iter(self.main_datapipe)
self.buffer = deque()
self.child_pointers = [0] * self.num_instances
self.slowest_ptr = 0
self.leading_ptr = 0
self.end_ptr = None
def __getstate__(self):
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(self)
state = (
self.main_datapipe,
self.num_instances,
self.buffer_size,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
return state
def __setstate__(self, state):
(
self.main_datapipe,
self.num_instances,
self.buffer_size,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._datapipe_iterator = None
self.buffer = deque()
self.child_pointers = [0] * self.num_instances
self.slowest_ptr = 0
self.leading_ptr = 0
self.end_ptr = None
def __del__(self):
self.buffer.clear()
class _ChildDataPipe(IterDataPipe):
r"""
Iterable Datapipe that is a child of a main DataPipe. The instance of this class
will pass its instance_id to get the next value from its main DataPipe.
Note:
ChildDataPipe, like all other IterDataPipe, follows the single iterator per IterDataPipe constraint.
Since ChildDataPipes share a common buffer, when an iterator is created for one of the ChildDataPipes,
the previous iterators for all ChildDataPipes must be invalidated, with the exception when a ChildDataPipe
hasn't had an iterator created from it since the last invalidation. See the example below.
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> # Singler Iterator per IteraDataPipe Invalidation
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(10))
>>> cdp1, cdp2 = source_dp.fork(num_instances=2)
>>> it1, it2 = iter(cdp1), iter(cdp2)
>>> it3 = iter(cdp1)
>>> # The line above invalidates `it1` and `it2`, and resets `ForkerIterDataPipe`.
>>> it4 = iter(cdp2)
>>> # The line above doesn't invalidate `it3`, because an iterator for `cdp2` hasn't been created since
>>> # the last invalidation.
Args:
main_datapipe: Main DataPipe with a method 'get_next_element_by_instance(instance_id)'
instance_id: integer identifier of this instance
"""
_is_child_datapipe: bool = True
def __init__(self, main_datapipe: IterDataPipe, instance_id: int):
required_attrs = ["get_next_element_by_instance", "is_every_instance_exhausted", "reset"]
required_ops = [getattr(main_datapipe, attr) for attr in required_attrs]
if any(not callable(op) for op in required_ops):
raise NotImplementedError(f"Main Datapipe must have methods {required_attrs} implemented.")
self.main_datapipe: IterDataPipe = main_datapipe
self.instance_id = instance_id
def __iter__(self):
# Note that the logic behind setting iterator ID and `reset` are handled within `hook_iterator`
# We want to separate the code for reset and yield, so that 'reset' executes before __next__ is called
return self.main_datapipe.get_next_element_by_instance(self.instance_id)
def __len__(self):
return len(self.main_datapipe)
# This method is called by `hook_iterator` in `_typing.py`.
def _set_main_datapipe_valid_iterator_id(self) -> int:
r"""
Update the valid iterator ID for both this DataPipe object and `main_datapipe`.
`main_datapipe.reset()` is called when the ID is incremented to a new generation.
"""
# 1. First time any child iterator is created
if self.main_datapipe._valid_iterator_id is None:
self.main_datapipe._valid_iterator_id = 0 # type: ignore[attr-defined]
# 2. This instance was already in the same generation as `main_datapipe`,
# we need to increment the ID further by 1
elif self.main_datapipe._valid_iterator_id == self._valid_iterator_id: # type: ignore[has-type]
self.main_datapipe._valid_iterator_id += 1 # type: ignore[attr-defined]
# Whenever a new generation of iterator is created, the `main_datapipe` must reset
if not self.main_datapipe.is_every_instance_exhausted():
warnings.warn("Some child DataPipes are not exhausted when __iter__ is called. We are resetting "
"the buffer and each child DataPipe will read from the start again.", UserWarning)
self.main_datapipe.reset()
# 3. Otherwise, the iterator is behind the others, so it will just need to catch up by setting
# the instance's iterator to match that of `main_datapipe`
self._valid_iterator_id = self.main_datapipe._valid_iterator_id
return self._valid_iterator_id
# This method is called by `hook_iterator` in `_typing.py`.
def _check_valid_iterator_id(self, iterator_id) -> bool:
r"""
Check the valid iterator ID against that of DataPipe object and that of `main_datapipe`.
"""
return iterator_id == self._valid_iterator_id and iterator_id == self.main_datapipe._valid_iterator_id
@functional_datapipe('demux')
class DemultiplexerIterDataPipe(IterDataPipe):
r"""
Splits the input DataPipe into multiple child DataPipes, using the given
classification function (functional name: ``demux``). A list of the child DataPipes is returned from this operation.
Args:
datapipe: Iterable DataPipe being filtered
num_instances: number of instances of the DataPipe to create
classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None``
drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None``
buffer_size: this defines the maximum number of inputs that the buffer can hold across all child
DataPipes while waiting for their values to be yielded.
Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
Examples:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def odd_or_even(n):
... return n % 2
>>> source_dp = IterableWrapper(range(5))
>>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even)
>>> list(dp1)
[0, 2, 4]
>>> list(dp2)
[1, 3]
>>> # It can also filter out any element that gets `None` from the `classifier_fn`
>>> def odd_or_even_no_zero(n):
... return n % 2 if n != 0 else None
>>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True)
>>> list(dp1)
[2, 4]
>>> list(dp2)
[1, 3]
"""
def __new__(cls, datapipe: IterDataPipe, num_instances: int,
classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000):
if num_instances < 1:
raise ValueError(f"Expected `num_instaces` larger than 0, but {num_instances} is found")
_check_unpickable_fn(classifier_fn)
# When num_instances == 1, demux can be replaced by filter,
# but keep it as Demultiplexer for the sake of consistency
# like throwing Error when classification result is out of o range
container = _DemultiplexerIterDataPipe(datapipe, num_instances, classifier_fn, drop_none, buffer_size)
return [_ChildDataPipe(container, i) for i in range(num_instances)]
class _DemultiplexerIterDataPipe(IterDataPipe):
r"""
Container to hold instance-specific information on behalf of DemultiplexerIterDataPipe. It tracks
the state of its child DataPipes, maintains the buffer, classifies and yields the next correct value
as requested by the child DataPipes.
"""
def __init__(self, datapipe: IterDataPipe[T_co], num_instances: int,
classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool, buffer_size: int):
self.main_datapipe = datapipe
self._datapipe_iterator: Optional[Iterator[Any]] = None
self.num_instances = num_instances
self.buffer_size = buffer_size
if self.buffer_size < 0:
warnings.warn(
"Unlimited buffer size is set for `demux`, "
"please be aware of OOM at random places",
UserWarning
)
self.current_buffer_usage = 0
self.child_buffers: List[Deque[T_co]] = [deque() for _ in range(num_instances)]
self.classifier_fn = classifier_fn
self.drop_none = drop_none
self.main_datapipe_exhausted = False
def _find_next(self, instance_id: int) -> T_co:
while True:
if self.main_datapipe_exhausted:
raise StopIteration
if self._datapipe_iterator is None:
raise ValueError(
"_datapipe_iterator has not been set, likely because this private method is called directly "
"without invoking get_next_element_by_instance() first.")
value = next(self._datapipe_iterator)
classification = self.classifier_fn(value)
if classification is None and self.drop_none:
StreamWrapper.close_streams(value)
continue
if classification is None or classification >= self.num_instances or classification < 0:
raise ValueError(f"Output of the classification fn should be between 0 and {self.num_instances - 1}. " +
f"{classification} is returned.")
if classification == instance_id:
return value
self.child_buffers[classification].append(value)
self.current_buffer_usage += 1
if self.buffer_size >= 0 and self.current_buffer_usage > self.buffer_size:
raise BufferError(
f"DemultiplexerIterDataPipe buffer overflow, buffer size {self.buffer_size} is insufficient.")
def get_next_element_by_instance(self, instance_id: int):
if self._datapipe_iterator is None and not self.main_datapipe_exhausted:
self._datapipe_iterator = iter(self.main_datapipe)
self._snapshot_state = _SnapshotState.Iterating # This is necessary for the DataPipe to reset properly.
stop = False
while not stop:
if self.child_buffers[instance_id]:
self.current_buffer_usage -= 1
yield self.child_buffers[instance_id].popleft()
else:
try:
yield self._find_next(instance_id)
except StopIteration:
stop = True
self.main_datapipe_exhausted = True
self._datapipe_iterator = None
def is_every_instance_exhausted(self) -> bool:
return self.main_datapipe_exhausted and all(not child_buffer for child_buffer in self.child_buffers)
def reset(self) -> None:
self._datapipe_iterator = None
self.current_buffer_usage = 0
self.child_buffers = [deque() for _ in range(self.num_instances)]
self.main_datapipe_exhausted = False
def __getstate__(self):
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(self)
state = (
self.main_datapipe,
self.num_instances,
self.buffer_size,
self.classifier_fn,
self.drop_none,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
return state
def __setstate__(self, state):
(
self.main_datapipe,
self.num_instances,
self.buffer_size,
self.classifier_fn,
self.drop_none,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._datapipe_iterator = None
self.current_buffer_usage = 0
self.child_buffers = [deque() for _ in range(self.num_instances)]
self.main_datapipe_exhausted = False
def __del__(self):
for dq in self.child_buffers:
dq.clear()
@functional_datapipe('mux')
class MultiplexerIterDataPipe(IterDataPipe):
r"""
Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``). As in,
one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration,
and so on. It ends when the shortest input DataPipe is exhausted.
Args:
datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
>>> list(dp1.mux(dp2, dp3))
[0, 10, 20, 1, 11, 21, 2, 12, 22]
"""
def __init__(self, *datapipes):
self.datapipes = datapipes
self.length: Optional[int] = None
self.buffer: List = [] # Store values to be yielded only when every iterator provides one
def __iter__(self):
iterators = [iter(x) for x in self.datapipes]
while len(iterators):
for it in iterators:
try:
value = next(it)
self.buffer.append(value)
except StopIteration:
self.buffer.clear()
return
for value in self.buffer:
yield value
self.buffer.clear()
def __len__(self):
if self.length is not None:
if self.length == -1:
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
return self.length
if all(isinstance(dp, Sized) for dp in self.datapipes):
self.length = min(len(dp) for dp in self.datapipes) * len(self.datapipes)
else:
self.length = -1
return len(self)
def reset(self) -> None:
self.buffer = []
def __getstate__(self):
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(self)
state = (
self.datapipes,
self.length,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
return state
def __setstate__(self, state):
(
self.datapipes,
self.length,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self.buffer = []
def __del__(self):
self.buffer.clear()
@functional_datapipe('zip')
class ZipperIterDataPipe(IterDataPipe[Tuple[T_co]]):
r"""
Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
The output is stopped as soon as the shortest input DataPipe is exhausted.
Args:
*datapipes: Iterable DataPipes being aggregated
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
>>> list(dp1.zip(dp2, dp3))
[(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)]
"""
datapipes: Tuple[IterDataPipe]
length: Optional[int]
def __init__(self, *datapipes: IterDataPipe):
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
raise TypeError("All inputs are required to be `IterDataPipe` "
"for `ZipIterDataPipe`.")
super().__init__()
self.datapipes = datapipes # type: ignore[assignment]
self.length = None
def __iter__(self) -> Iterator[Tuple[T_co]]:
iterators = [iter(datapipe) for datapipe in self.datapipes]
try:
for data in zip(*iterators):
yield data
finally:
unused = []
for iterator in iterators:
try:
unused += list(iterator)
except RuntimeError: # Some iterators may have been invalidated by single iterator constraints
pass
# TODO(VitalyFedyunin): This should be Exception or warning when torchdata.debug is enabled
for item in unused:
StreamWrapper.close_streams(item)
def __len__(self) -> int:
if self.length is not None:
if self.length == -1:
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
return self.length
if all(isinstance(dp, Sized) for dp in self.datapipes):
self.length = min(len(dp) for dp in self.datapipes)
else:
self.length = -1
return len(self)
|
pytorch-master
|
torch/utils/data/datapipes/iter/combining.py
|
from typing import Iterator, List, Sequence, Union
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.iter import IterableWrapper
from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root
__all__ = ["FileListerIterDataPipe", ]
@functional_datapipe("list_files")
class FileListerIterDataPipe(IterDataPipe[str]):
r"""
Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
Multiple root directories can be provided.
Args:
root: Root directory or a sequence of root directories
masks: Unix style filter string or string list for filtering file name(s)
recursive: Whether to return pathname from nested directories or not
abspath: Whether to return relative pathname or absolute pathname
non_deterministic: Whether to return pathname in sorted order or not.
If ``False``, the results yielded from each root directory will be sorted
length: Nominal length of the datapipe
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import FileLister
>>> dp = FileLister(root=".", recursive=True)
>>> list(dp)
['example.py', './data/data.tar']
"""
def __init__(
self,
root: Union[str, Sequence[str], IterDataPipe] = '.',
masks: Union[str, List[str]] = '',
*,
recursive: bool = False,
abspath: bool = False,
non_deterministic: bool = False,
length: int = -1
) -> None:
super().__init__()
if isinstance(root, str):
root = [root, ]
if not isinstance(root, IterDataPipe):
root = IterableWrapper(root)
self.datapipe: IterDataPipe = root
self.masks: Union[str, List[str]] = masks
self.recursive: bool = recursive
self.abspath: bool = abspath
self.non_deterministic: bool = non_deterministic
self.length: int = length
def __iter__(self) -> Iterator[str] :
for path in self.datapipe:
yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic)
def __len__(self):
if self.length == -1:
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
return self.length
|
pytorch-master
|
torch/utils/data/datapipes/iter/filelister.py
|
import copy
import warnings
from torch.utils.data.datapipes.datapipe import IterDataPipe
__all__ = ["IterableWrapperIterDataPipe", ]
class IterableWrapperIterDataPipe(IterDataPipe):
r"""
Wraps an iterable object to create an IterDataPipe.
Args:
iterable: Iterable object to be wrapped into an IterDataPipe
deepcopy: Option to deepcopy input iterable object for each
iterator. The copy is made when the first element is read in ``iter()``.
.. note::
If ``deepcopy`` is explicitly set to ``False``, users should ensure
that the data pipeline doesn't contain any in-place operations over
the iterable instance to prevent data inconsistency across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
def __init__(self, iterable, deepcopy=True):
self.iterable = iterable
self.deepcopy = deepcopy
def __iter__(self):
source_data = self.iterable
if self.deepcopy:
try:
source_data = copy.deepcopy(self.iterable)
# For the case that data cannot be deep-copied,
# all in-place operations will affect iterable variable.
# When this DataPipe is iterated second time, it will
# yield modified items.
except TypeError:
warnings.warn(
"The input iterable can not be deepcopied, "
"please be aware of in-place modification would affect source data."
)
for data in source_data:
yield data
def __len__(self):
return len(self.iterable)
|
pytorch-master
|
torch/utils/data/datapipes/iter/utils.py
|
import random
import torch
from torch.utils.data import Sampler, SequentialSampler
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from typing import Dict, Iterator, List, Optional, Sized, Tuple, Type, TypeVar
__all__ = [
"SamplerIterDataPipe",
"ShufflerIterDataPipe",
]
T_co = TypeVar('T_co', covariant=True)
class SamplerIterDataPipe(IterDataPipe[T_co]):
r"""
Generates sample elements using the provided ``Sampler`` (defaults to :class:`SequentialSampler`).
Args:
datapipe: IterDataPipe to sample from
sampler: Sampler class to generate sample elements from input DataPipe.
Default is :class:`SequentialSampler` for IterDataPipe
"""
datapipe: IterDataPipe
sampler: Sampler
def __init__(self,
datapipe: IterDataPipe,
sampler: Type[Sampler] = SequentialSampler,
sampler_args: Optional[Tuple] = None,
sampler_kwargs: Optional[Dict] = None
) -> None:
assert isinstance(datapipe, Sized), \
"Sampler class requires input datapipe implemented `__len__`"
super().__init__()
self.datapipe = datapipe
self.sampler_args = () if sampler_args is None else sampler_args
self.sampler_kwargs = {} if sampler_kwargs is None else sampler_kwargs
# https://github.com/python/mypy/pull/9629 will solve
self.sampler = sampler(data_source=self.datapipe, *self.sampler_args, **self.sampler_kwargs) # type: ignore[misc]
def __iter__(self) -> Iterator[T_co]:
return iter(self.sampler)
def __len__(self) -> int:
# Dataset has been tested as `Sized`
if isinstance(self.sampler, Sized) and len(self.sampler) >= 0:
return len(self.sampler)
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
@functional_datapipe('shuffle')
class ShufflerIterDataPipe(IterDataPipe[T_co]):
r"""
Shuffles the input DataPipe with a buffer (functional name: ``shuffle``). The buffer
with ``buffer_size`` is filled with elements from the datapipe first. Then,
each item will be yielded from the buffer by reservoir sampling via iterator.
``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the
datapipe is not shuffled. In order to fully shuffle all elements from datapipe,
``buffer_size`` is required to be greater than or equal to the size of datapipe.
When it is used with :class:`torch.utils.data.DataLoader`, the methods to
set up random seed are different based on :attr:`num_workers`.
For single-process mode (:attr:`num_workers == 0`), the random seed is set before
the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed
for each worker process.
Args:
datapipe: The IterDataPipe being shuffled
buffer_size: The buffer size for shuffling (default to ``10000``)
unbatch_level: Specifies if it is necessary to unbatch source data before
applying the shuffle
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> shuffle_dp = dp.shuffle()
>>> list(shuffle_dp)
[0, 4, 1, 6, 3, 2, 9, 5, 7, 8]
"""
datapipe: IterDataPipe[T_co]
buffer_size: int
_buffer: List[T_co]
_enabled: bool
_seed: Optional[int]
_rng: random.Random
def __init__(self,
datapipe: IterDataPipe[T_co],
*,
buffer_size: int = 10000,
unbatch_level: int = 0
) -> None:
super().__init__()
# TODO: Performance optimization
# buffer can be a fixed size and remove expensive `append()` and `len()` operations
self._buffer: List[T_co] = []
assert buffer_size > 0, "buffer_size should be larger than 0"
if unbatch_level == 0:
self.datapipe = datapipe
else:
self.datapipe = datapipe.unbatch(unbatch_level=unbatch_level)
self.buffer_size = buffer_size
self._enabled = True
self._seed = None
self._rng = random.Random()
def set_shuffle(self, shuffle=True):
self._enabled = shuffle
return self
def set_seed(self, seed: int):
self._seed = seed
def __iter__(self) -> Iterator[T_co]:
if not self._enabled:
for x in self.datapipe:
yield x
else:
self._rng.seed(self._seed)
self._seed = None
for x in self.datapipe:
if len(self._buffer) == self.buffer_size:
idx = self._rng.randint(0, len(self._buffer) - 1)
val, self._buffer[idx] = self._buffer[idx], x
yield val
else:
self._buffer.append(x)
self._rng.shuffle(self._buffer)
while self._buffer:
yield self._buffer.pop()
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
def reset(self) -> None:
self._buffer = []
if self._enabled and self._seed is None:
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
def __getstate__(self):
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(self)
state = (
self.datapipe,
self.buffer_size,
self._enabled,
self._seed,
self._valid_iterator_id,
self._number_of_samples_yielded,
self._rng.getstate(),
)
return state
def __setstate__(self, state):
(
self.datapipe,
self.buffer_size,
self._enabled,
self._seed,
self._valid_iterator_id,
self._number_of_samples_yielded,
rng_state,
) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
self._buffer = []
def __del__(self):
self._buffer.clear()
|
pytorch-master
|
torch/utils/data/datapipes/iter/combinatorics.py
|
from typing import Tuple
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
__all__ = ["StreamReaderIterDataPipe", ]
@functional_datapipe('read_from_stream')
class StreamReaderIterDataPipe(IterDataPipe[Tuple[str, bytes]]):
r"""
Given IO streams and their label names, yields bytes with label
name in a tuple (functional name: ``read_from_stream``).
Args:
datapipe: Iterable DataPipe provides label/URL and byte stream
chunk: Number of bytes to be read from stream per iteration.
If ``None``, all bytes will be read util the EOF.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper, StreamReader
>>> from io import StringIO
>>> dp = IterableWrapper([("alphabet", StringIO("abcde"))])
>>> list(StreamReader(dp, chunk=1))
[('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')]
"""
def __init__(self, datapipe, chunk=None):
self.datapipe = datapipe
self.chunk = chunk
def __iter__(self):
for furl, stream in self.datapipe:
while True:
d = stream.read(self.chunk)
if not d:
stream.close()
break
yield (furl, d)
|
pytorch-master
|
torch/utils/data/datapipes/iter/streamreader.py
|
from io import BufferedIOBase
from typing import Any, Callable, Iterable, Iterator, Sized, Tuple
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import _deprecation_warning
from torch.utils.data.datapipes.utils.decoder import (
Decoder,
basichandlers as decoder_basichandlers,
imagehandler as decoder_imagehandler,
extension_extract_fn
)
__all__ = ["RoutedDecoderIterDataPipe", ]
@functional_datapipe('routed_decode')
class RoutedDecoderIterDataPipe(IterDataPipe[Tuple[str, Any]]):
r"""
Decodes binary streams from input DataPipe, yields pathname and decoded data
in a tuple (functional name: ``routed_decode``).
Args:
datapipe: Iterable datapipe that provides pathname and binary stream in tuples
handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder
handlers will be set as default. If multiple handles are provided, the priority
order follows the order of handlers (the first handler has the top priority)
key_fn: Function for decoder to extract key from pathname to dispatch handlers.
Default is set to extract file extension from pathname
Note:
When ``key_fn`` is specified returning anything other than extension, the default
handler will not work and users need to specify custom handler. Custom handler
could use regex to determine the eligibility to handle data.
"""
def __init__(self,
datapipe: Iterable[Tuple[str, BufferedIOBase]],
*handlers: Callable,
key_fn: Callable = extension_extract_fn) -> None:
super().__init__()
self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe
if not handlers:
handlers = (decoder_basichandlers, decoder_imagehandler('torch'))
self.decoder = Decoder(*handlers, key_fn=key_fn)
_deprecation_warning(
type(self).__name__,
deprecation_version="1.12",
removal_version="1.13",
old_functional_name="routed_decode",
)
def add_handler(self, *handler: Callable) -> None:
self.decoder.add_handler(*handler)
def __iter__(self) -> Iterator[Tuple[str, Any]]:
for data in self.datapipe:
pathname = data[0]
result = self.decoder(data)
yield (pathname, result[pathname])
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
|
pytorch-master
|
torch/utils/data/datapipes/iter/routeddecoder.py
|
# This file takes partial of the implementation from NVIDIA's webdataset at here:
# https://github.com/tmbdev/webdataset/blob/master/webdataset/autodecode.py
import io
import json
import os.path
import pickle
import tempfile
import torch
from torch.utils.data.datapipes.utils.common import StreamWrapper
__all__ = [
"Decoder",
"ImageHandler",
"MatHandler",
"audiohandler",
"basichandlers",
"extension_extract_fn",
"handle_extension",
"imagehandler",
"mathandler",
"videohandler",
]
################################################################
# handle basic datatypes
################################################################
def basichandlers(extension, data):
if extension in "txt text transcript":
return data.decode("utf-8")
if extension in "cls cls2 class count index inx id".split():
try:
return int(data)
except ValueError:
return None
if extension in "json jsn":
return json.loads(data)
if extension in "pyd pickle".split():
return pickle.loads(data)
if extension in "pt".split():
stream = io.BytesIO(data)
return torch.load(stream)
# if extension in "ten tb".split():
# from . import tenbin
# return tenbin.decode_buffer(data)
# if extension in "mp msgpack msg".split():
# import msgpack
# return msgpack.unpackb(data)
return None
################################################################
# handle images
################################################################
imagespecs = {
"l8": ("numpy", "uint8", "l"),
"rgb8": ("numpy", "uint8", "rgb"),
"rgba8": ("numpy", "uint8", "rgba"),
"l": ("numpy", "float", "l"),
"rgb": ("numpy", "float", "rgb"),
"rgba": ("numpy", "float", "rgba"),
"torchl8": ("torch", "uint8", "l"),
"torchrgb8": ("torch", "uint8", "rgb"),
"torchrgba8": ("torch", "uint8", "rgba"),
"torchl": ("torch", "float", "l"),
"torchrgb": ("torch", "float", "rgb"),
"torch": ("torch", "float", "rgb"),
"torchrgba": ("torch", "float", "rgba"),
"pill": ("pil", None, "l"),
"pil": ("pil", None, "rgb"),
"pilrgb": ("pil", None, "rgb"),
"pilrgba": ("pil", None, "rgba"),
}
def handle_extension(extensions, f):
"""
Returns a decoder handler function for the list of extensions.
Extensions can be a space separated list of extensions.
Extensions can contain dots, in which case the corresponding number
of extension components must be present in the key given to f.
Comparisons are case insensitive.
Examples:
handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg
handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg
"""
extensions = extensions.lower().split()
def g(key, data):
extension = key.lower().split(".")
for target in extensions:
target = target.split(".")
if len(target) > len(extension):
continue
if extension[-len(target):] == target:
return f(data)
return None
return g
class ImageHandler:
"""
Decode image data using the given `imagespec`.
The `imagespec` specifies whether the image is decoded
to numpy/torch/pi, decoded to uint8/float, and decoded
to l/rgb/rgba:
- l8: numpy uint8 l
- rgb8: numpy uint8 rgb
- rgba8: numpy uint8 rgba
- l: numpy float l
- rgb: numpy float rgb
- rgba: numpy float rgba
- torchl8: torch uint8 l
- torchrgb8: torch uint8 rgb
- torchrgba8: torch uint8 rgba
- torchl: torch float l
- torchrgb: torch float rgb
- torch: torch float rgb
- torchrgba: torch float rgba
- pill: pil None l
- pil: pil None rgb
- pilrgb: pil None rgb
- pilrgba: pil None rgba
"""
def __init__(self, imagespec):
assert imagespec in list(imagespecs.keys()), "unknown image specification: {}".format(imagespec)
self.imagespec = imagespec.lower()
def __call__(self, extension, data):
if extension.lower() not in "jpg jpeg png ppm pgm pbm pnm".split():
return None
try:
import numpy as np
except ImportError as e:
raise ModuleNotFoundError("Package `numpy` is required to be installed for default image decoder."
"Please use `pip install numpy` to install the package")
try:
import PIL.Image
except ImportError as e:
raise ModuleNotFoundError("Package `PIL` is required to be installed for default image decoder."
"Please use `pip install Pillow` to install the package")
imagespec = self.imagespec
atype, etype, mode = imagespecs[imagespec]
with io.BytesIO(data) as stream:
img = PIL.Image.open(stream)
img.load()
img = img.convert(mode.upper())
if atype == "pil":
return img
elif atype == "numpy":
result = np.asarray(img)
assert result.dtype == np.uint8, "numpy image array should be type uint8, but got {}".format(result.dtype)
if etype == "uint8":
return result
else:
return result.astype("f") / 255.0
elif atype == "torch":
result = np.asarray(img)
assert result.dtype == np.uint8, "numpy image array should be type uint8, but got {}".format(result.dtype)
if etype == "uint8":
result = np.array(result.transpose(2, 0, 1))
return torch.tensor(result)
else:
result = np.array(result.transpose(2, 0, 1))
return torch.tensor(result) / 255.0
return None
def imagehandler(imagespec):
return ImageHandler(imagespec)
################################################################
# torch video
################################################################
def videohandler(extension, data):
if extension not in "mp4 ogv mjpeg avi mov h264 mpg webm wmv".split():
return None
try:
import torchvision.io
except ImportError as e:
raise ModuleNotFoundError("Package `torchvision` is required to be installed for default video file loader."
"Please use `pip install torchvision` or `conda install torchvision -c pytorch`"
"to install the package")
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f"file.{extension}")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname)
################################################################
# torchaudio
################################################################
def audiohandler(extension, data):
if extension not in ["flac", "mp3", "sox", "wav", "m4a", "ogg", "wma"]:
return None
try:
import torchaudio # type: ignore[import]
except ImportError as e:
raise ModuleNotFoundError("Package `torchaudio` is required to be installed for default audio file loader."
"Please use `pip install torchaudio` or `conda install torchaudio -c pytorch`"
"to install the package")
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f"file.{extension}")
with open(fname, "wb") as stream:
stream.write(data)
return torchaudio.load(fname)
################################################################
# mat
################################################################
class MatHandler:
def __init__(self, **loadmat_kwargs) -> None:
try:
import scipy.io as sio
except ImportError as e:
raise ModuleNotFoundError("Package `scipy` is required to be installed for mat file."
"Please use `pip install scipy` or `conda install scipy`"
"to install the package")
self.sio = sio
self.loadmat_kwargs = loadmat_kwargs
def __call__(self, extension, data):
if extension != 'mat':
return None
with io.BytesIO(data) as stream:
return self.sio.loadmat(stream, **self.loadmat_kwargs)
def mathandler(**loadmat_kwargs):
return MatHandler(**loadmat_kwargs)
################################################################
# a sample decoder
################################################################
# Extract extension from pathname
def extension_extract_fn(pathname):
ext = os.path.splitext(pathname)[1]
# Remove dot
if ext:
ext = ext[1:]
return ext
class Decoder:
"""
Decode key/data sets using a list of handlers.
For each key/data item, this iterates through the list of
handlers until some handler returns something other than None.
"""
def __init__(self, *handler, key_fn=extension_extract_fn):
self.handlers = list(handler) if handler else []
self.key_fn = key_fn
# Insert new handler from the beginning of handlers list to make sure the new
# handler having the highest priority
def add_handler(self, *handler):
if not handler:
return
self.handlers = list(handler) + self.handlers
@staticmethod
def _is_stream_handle(data):
obj_to_check = data.file_obj if isinstance(data, StreamWrapper) else data
return isinstance(obj_to_check, io.BufferedIOBase) or isinstance(obj_to_check, io.RawIOBase)
def decode1(self, key, data):
if not data:
return data
# if data is a stream handle, we need to read all the content before decoding
if Decoder._is_stream_handle(data):
ds = data
# The behavior of .read can differ between streams (e.g. HTTPResponse), hence this is used instead
data = b"".join(data)
ds.close()
for f in self.handlers:
result = f(key, data)
if result is not None:
return result
return data
def decode(self, data):
result = {}
# single data tuple(pathname, data stream)
if isinstance(data, tuple):
data = [data]
if data is not None:
for k, v in data:
# TODO: xinyu, figure out why Nvidia do this?
if k[0] == "_":
if isinstance(v, bytes):
v = v.decode("utf-8")
result[k] = v
continue
result[k] = self.decode1(self.key_fn(k), v)
return result
def __call__(self, data):
return self.decode(data)
|
pytorch-master
|
torch/utils/data/datapipes/utils/decoder.py
|
pytorch-master
|
torch/utils/data/datapipes/utils/__init__.py
|
|
import fnmatch
import inspect
import os
import warnings
from io import IOBase
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
from torch.utils.data._utils.serialization import DILL_AVAILABLE
__all__ = [
"StreamWrapper",
"get_file_binaries_from_pathnames",
"get_file_pathnames_from_root",
"match_masks",
"validate_pathname_binary_tuple",
]
def _is_local_fn(fn):
# Functions or Methods
if hasattr(fn, "__code__"):
return fn.__code__.co_flags & inspect.CO_NESTED
# Callable Objects
else:
if hasattr(fn, "__qualname__"):
return "<locals>" in fn.__qualname__
fn_type = type(fn)
if hasattr(fn_type, "__qualname__"):
return "<locals>" in fn_type.__qualname__
return False
def _check_unpickable_fn(fn: Callable):
"""
Checks function is pickable or not. If it is a lambda or local function, a UserWarning
will be raised. If it's not a callable function, a TypeError will be raised.
"""
if not callable(fn):
raise TypeError(f"A callable function is expected, but {type(fn)} is provided.")
# Extract function from partial object
# Nested partial function is automatically expanded as a single partial object
if isinstance(fn, partial):
fn = fn.func
# Local function
if _is_local_fn(fn) and not DILL_AVAILABLE:
warnings.warn(
"Local function is not supported by pickle, please use "
"regular python function or functools.partial instead."
)
return
# Lambda function
if hasattr(fn, "__name__") and fn.__name__ == "<lambda>" and not DILL_AVAILABLE:
warnings.warn(
"Lambda function is not supported by pickle, please use "
"regular python function or functools.partial instead."
)
return
def match_masks(name : str, masks : Union[str, List[str]]) -> bool:
# empty mask matches any input name
if not masks:
return True
if isinstance(masks, str):
return fnmatch.fnmatch(name, masks)
for mask in masks:
if fnmatch.fnmatch(name, mask):
return True
return False
def get_file_pathnames_from_root(
root: str,
masks: Union[str, List[str]],
recursive: bool = False,
abspath: bool = False,
non_deterministic: bool = False) -> Iterable[str]:
# print out an error message and raise the error out
def onerror(err : OSError):
warnings.warn(err.filename + " : " + err.strerror)
raise err
if os.path.isfile(root):
path = root
if abspath:
path = os.path.abspath(path)
fname = os.path.basename(path)
if match_masks(fname, masks):
yield path
else:
for path, dirs, files in os.walk(root, onerror=onerror):
if abspath:
path = os.path.abspath(path)
if not non_deterministic:
files.sort()
for f in files:
if match_masks(f, masks):
yield os.path.join(path, f)
if not recursive:
break
if not non_deterministic:
# Note that this is in-place modifying the internal list from `os.walk`
# This only works because `os.walk` doesn't shallow copy before turn
# https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/os.py#L407
dirs.sort()
def get_file_binaries_from_pathnames(pathnames: Iterable, mode: str, encoding: Optional[str] = None):
if not isinstance(pathnames, Iterable):
pathnames = [pathnames, ]
if mode in ('b', 't'):
mode = 'r' + mode
for pathname in pathnames:
if not isinstance(pathname, str):
raise TypeError("Expected string type for pathname, but got {}"
.format(type(pathname)))
yield pathname, StreamWrapper(open(pathname, mode, encoding=encoding))
def validate_pathname_binary_tuple(data: Tuple[str, IOBase]):
if not isinstance(data, tuple):
raise TypeError(f"pathname binary data should be tuple type, but it is type {type(data)}")
if len(data) != 2:
raise TypeError(f"pathname binary stream tuple length should be 2, but got {len(data)}")
if not isinstance(data[0], str):
raise TypeError(f"pathname within the tuple should have string type pathname, but it is type {type(data[0])}")
if not isinstance(data[1], IOBase) and not isinstance(data[1], StreamWrapper):
raise TypeError(
f"binary stream within the tuple should have IOBase or"
f"its subclasses as type, but it is type {type(data[1])}"
)
# Deprecated function names and its corresponding DataPipe type and kwargs for the `_deprecation_warning` function
_iter_deprecated_functional_names: Dict[str, Dict] = {"open_file_by_fsspec":
{"old_class_name": "FSSpecFileOpener",
"deprecation_version": "0.4.0",
"removal_version": "0.6.0",
"old_functional_name": "open_file_by_fsspec",
"new_functional_name": "open_files_by_fsspec",
"deprecate_functional_name_only": True},
"open_file_by_iopath":
{"old_class_name": "IoPathFileOpener",
"deprecation_version": "0.4.0",
"removal_version": "0.6.0",
"old_functional_name": "open_file_by_iopath",
"new_functional_name": "open_files_by_iopath",
"deprecate_functional_name_only": True}}
_map_deprecated_functional_names: Dict[str, Dict] = {}
def _deprecation_warning(
old_class_name: str,
*,
deprecation_version: str,
removal_version: str,
old_functional_name: str = "",
old_argument_name: str = "",
new_class_name: str = "",
new_functional_name: str = "",
new_argument_name: str = "",
deprecate_functional_name_only: bool = False,
) -> None:
if new_functional_name and not old_functional_name:
raise ValueError("Old functional API needs to be specified for the deprecation warning.")
if new_argument_name and not old_argument_name:
raise ValueError("Old argument name needs to be specified for the deprecation warning.")
if old_functional_name and old_argument_name:
raise ValueError("Deprecating warning for functional API and argument should be separated.")
msg = f"`{old_class_name}()`"
if deprecate_functional_name_only and old_functional_name:
msg = f"{msg}'s functional API `.{old_functional_name}()` is"
elif old_functional_name:
msg = f"{msg} and its functional API `.{old_functional_name}()` are"
elif old_argument_name:
msg = f"The argument `{old_argument_name}` of {msg} is"
else:
msg = f"{msg} is"
msg = (
f"{msg} deprecated since {deprecation_version} and will be removed in {removal_version}."
f"\nSee https://github.com/pytorch/data/issues/163 for details."
)
if new_class_name or new_functional_name:
msg = f"{msg}\nPlease use"
if new_class_name:
msg = f"{msg} `{new_class_name}()`"
if new_class_name and new_functional_name:
msg = f"{msg} or"
if new_functional_name:
msg = f"{msg} `.{new_functional_name}()`"
msg = f"{msg} instead."
if new_argument_name:
msg = f"{msg}\nPlease use `{old_class_name}({new_argument_name}=)` instead."
warnings.warn(msg, FutureWarning)
class StreamWrapper:
"""
StreamWrapper is introduced to wrap file handler generated by
DataPipe operation like `FileOpener`. StreamWrapper would guarantee
the wrapped file handler is closed when it's out of scope.
"""
session_streams: Dict[Any, int] = {}
debug_unclosed_streams: bool = False
def __init__(self, file_obj, parent_stream=None, name=None):
self.file_obj = file_obj
self.child_counter = 0
self.parent_stream = parent_stream
self.close_on_last_child = False
self.name = name
self.closed = False
if parent_stream is not None:
if not isinstance(parent_stream, StreamWrapper):
raise RuntimeError('Parent stream should be StreamWrapper, {} was given'.format(type(parent_stream)))
parent_stream.child_counter += 1
self.parent_stream = parent_stream
if StreamWrapper.debug_unclosed_streams:
StreamWrapper.session_streams[self] = 1
@classmethod
def close_streams(cls, v, depth=0):
"""
Traverse structure and attempts to close all found StreamWrappers on best effort basis.
"""
if depth > 10:
return
if isinstance(v, StreamWrapper):
v.close()
else:
# Traverse only simple structures
if isinstance(v, dict):
for kk, vv in v.items():
cls.close_streams(vv, depth=depth + 1)
elif isinstance(v, list) or isinstance(v, tuple):
for vv in v:
cls.close_streams(vv, depth=depth + 1)
def __getattr__(self, name):
file_obj = self.__dict__['file_obj']
return getattr(file_obj, name)
def close(self, *args, **kwargs):
if StreamWrapper.debug_unclosed_streams:
del StreamWrapper.session_streams[self]
if hasattr(self, "parent_stream") and self.parent_stream is not None:
self.parent_stream.child_counter -= 1
if not self.parent_stream.child_counter and self.parent_stream.close_on_last_child:
self.parent_stream.close()
try:
self.file_obj.close(*args, **kwargs)
except AttributeError:
pass
self.closed = True
def autoclose(self):
"""
Close steam if there is no children, or make it to be automatically closed as soon as
all child streams are closed.
"""
if self.child_counter == 0:
self.close()
self.close_on_last_child = True
def __dir__(self):
attrs = list(self.__dict__.keys()) + list(StreamWrapper.__dict__.keys())
attrs += dir(self.file_obj)
return list(set(list(attrs)))
def __del__(self):
if not self.closed:
self.close()
def __iter__(self):
for line in self.file_obj:
yield line
def __next__(self):
return next(self.file_obj)
def __repr__(self):
if self.name is None:
return f"StreamWrapper<{self.file_obj!r}>"
else:
return f"StreamWrapper<{self.name},{self.file_obj!r}>"
def __getstate__(self):
return self.file_obj
def __setstate__(self, obj):
self.file_obj = obj
|
pytorch-master
|
torch/utils/data/datapipes/utils/common.py
|
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.graph_settings import apply_shuffle_seed
# TODO: Caveats
# 1. Caller (either the ReadingService or DataLoader) must pass in the initial RNG
# 2. `in_batch_shuffle` and `bucketbatch` are not compatible with this because they currently
# lack the option to `set_seed`.
def _simple_graph_snapshot_restoration(datapipe: IterDataPipe, n_iterations: int, rng=None) -> None:
r"""
This function will restore a snapshot by fast-forwarding the given DataPipe by ``n_iterations``,
and in the process, fast-forward its parent DataPipes as well at the cost of re-doing every computation.
For instance, applying this function to the final DataPipe of a graph will restore the snapshot
(via fast-forward) every DataPipe within the graph.
After you deserialize a DataPipe, you can use its `_number_of_samples_yielded` attribute as the input
to this function to forward the DataPipe.
A DataPipe cannot be restored twice in a row unless there is an iteration started between the restoration
attempts.
Note:
This is the simplest but least efficient way to fast-forward a DataPipe. Usage of other fast-forwarding
methods (custom ones if necessary) are recommended.
Args:
datapipe: IterDataPipe to be fast-forwarded
n_iterations: number of iterations to fast-forward
rng: ``Optional[torch.Generator]``. If not ``None``, this RNG will be used for shuffling. The generator
should be in its `initial` state as it was first passed into ``DataLoader`` or ``ReadingService``.
"""
if datapipe._snapshot_state == _SnapshotState.Restored:
raise RuntimeError(
"Snapshot restoration cannot be applied. You can only restore simple snapshot to the graph "
"if your graph has not been restored.")
# For this snapshot restoration function, we want the DataPipe to be at its initial state prior to
# simple fast-forwarding. Therefore, we need to call `reset` twice, because if `SnapshotState` is `Restored`,
# the first reset will not actually reset.
datapipe.reset() # This ensures `SnapshotState` is `Iterating` by this point, even if it was `Restored`.
apply_shuffle_seed(datapipe, rng)
remainder = n_iterations
it = iter(datapipe) # This always reset the DataPipe if it hasn't already.
while remainder > 0:
try:
next(it)
remainder -= 1
except StopIteration:
raise RuntimeError(f"Fast-forward {datapipe} by {n_iterations} iterations "
"exceeds the number of samples available.")
datapipe._fast_forward_iterator = it
# While the DataPipe has `_fast_forward_iterator`, `next()` will get result from there instead of elsewhere.
# This will prevent the DataPipe from resetting in the `iter()` call
# If another DataPipe is consuming it, it won't have to start over again
datapipe._snapshot_state = _SnapshotState.Restored
|
pytorch-master
|
torch/utils/data/datapipes/utils/snapshot.py
|
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
from typing import Callable, TypeVar
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import MapDataPipe
__all__ = ["MapperMapDataPipe", "default_fn"]
T_co = TypeVar('T_co', covariant=True)
# Default function to return each item directly
# In order to keep datapipe picklable, eliminates the usage
# of python lambda function
def default_fn(data):
return data
@functional_datapipe('map')
class MapperMapDataPipe(MapDataPipe[T_co]):
r"""
Apply the input function over each item from the source DataPipe (functional name: ``map``).
The function can be any regular Python function or partial object. Lambda
function is not recommended as it is not supported by pickle.
Args:
datapipe: Source MapDataPipe
fn: Function being applied to each item
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper, Mapper
>>> def add_one(x):
... return x + 1
>>> dp = SequenceWrapper(range(10))
>>> map_dp_1 = dp.map(add_one)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> map_dp_2 = Mapper(dp, lambda x: x + 1)
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
datapipe: MapDataPipe
fn: Callable
def __init__(
self,
datapipe: MapDataPipe,
fn: Callable = default_fn,
) -> None:
super().__init__()
self.datapipe = datapipe
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
def __len__(self) -> int:
return len(self.datapipe)
def __getitem__(self, index) -> T_co:
return self.fn(self.datapipe[index])
|
pytorch-master
|
torch/utils/data/datapipes/map/callable.py
|
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import MapDataPipe, DataChunk
from typing import List, Optional, Sized, TypeVar
__all__ = ["BatcherMapDataPipe", ]
T = TypeVar('T')
@functional_datapipe('batch')
class BatcherMapDataPipe(MapDataPipe[DataChunk]):
r"""
Create mini-batches of data (functional name: ``batch``). An outer dimension will be added as
``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the
last batch if ``drop_last`` is set to ``False``.
Args:
datapipe: Iterable DataPipe being batched
batch_size: The size of each batch
drop_last: Option to drop the last batch if it's not full
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> batch_dp = dp.batch(batch_size=2)
>>> list(batch_dp)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
"""
datapipe: MapDataPipe
batch_size: int
drop_last: bool
length: Optional[int]
def __init__(self,
datapipe: MapDataPipe[T],
batch_size: int,
drop_last: bool = False,
wrapper_class=DataChunk,
) -> None:
assert batch_size > 0, "Batch size is required to be larger than 0!"
super().__init__()
self.datapipe = datapipe
self.batch_size = batch_size
self.drop_last = drop_last
self.length = None
self.wrapper_class = wrapper_class
def __getitem__(self, index) -> DataChunk:
batch: List = []
indices = range(index * self.batch_size, (index + 1) * self.batch_size)
try:
for i in indices:
batch.append(self.datapipe[i])
return self.wrapper_class(batch)
except IndexError:
if not self.drop_last and len(batch) > 0:
return self.wrapper_class(batch)
else:
raise IndexError(f"Index {index} is out of bound.")
def __len__(self) -> int:
if self.length is not None:
return self.length
if isinstance(self.datapipe, Sized):
if self.drop_last:
self.length = len(self.datapipe) // self.batch_size
else:
self.length = (len(self.datapipe) + self.batch_size - 1) // self.batch_size
return self.length
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
|
pytorch-master
|
torch/utils/data/datapipes/map/grouping.py
|
# Functional DataPipe
from torch.utils.data.datapipes.map.callable import MapperMapDataPipe as Mapper
from torch.utils.data.datapipes.map.combinatorics import ShufflerMapDataPipe as Shuffler
from torch.utils.data.datapipes.map.combining import (
ConcaterMapDataPipe as Concater,
ZipperMapDataPipe as Zipper
)
from torch.utils.data.datapipes.map.grouping import (
BatcherMapDataPipe as Batcher
)
from torch.utils.data.datapipes.map.utils import SequenceWrapperMapDataPipe as SequenceWrapper
__all__ = ['Batcher', 'Concater', 'Mapper', 'SequenceWrapper', 'Shuffler', 'Zipper']
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
pytorch-master
|
torch/utils/data/datapipes/map/__init__.py
|
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import MapDataPipe
from typing import Sized, Tuple, TypeVar
__all__ = ["ConcaterMapDataPipe", "ZipperMapDataPipe"]
T_co = TypeVar('T_co', covariant=True)
@functional_datapipe('concat')
class ConcaterMapDataPipe(MapDataPipe):
r"""
Concatenate multiple Map DataPipes (functional name: ``concat``).
The new index of is the cumulative sum of source DataPipes.
For example, if there are 2 source DataPipes both with length 5,
index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to
elements of the first DataPipe, and 5 to 9 would refer to elements
of the second DataPipe.
Args:
datapipes: Map DataPipes being concatenated
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp1 = SequenceWrapper(range(3))
>>> dp2 = SequenceWrapper(range(3))
>>> concat_dp = dp1.concat(dp2)
>>> list(concat_dp)
[0, 1, 2, 0, 1, 2]
"""
datapipes: Tuple[MapDataPipe]
length: int
def __init__(self, *datapipes: MapDataPipe):
if len(datapipes) == 0:
raise ValueError("Expected at least one DataPipe, but got nothing")
if not all(isinstance(dp, MapDataPipe) for dp in datapipes):
raise TypeError("Expected all inputs to be `MapDataPipe`")
if not all(isinstance(dp, Sized) for dp in datapipes):
raise TypeError("Expected all inputs to be `Sized`")
self.datapipes = datapipes # type: ignore[assignment]
self.length = -1
def __getitem__(self, index) -> T_co:
offset = 0
for dp in self.datapipes:
if index - offset < len(dp):
return dp[index - offset]
else:
offset += len(dp)
raise IndexError("Index {} is out of range.".format(index))
def __len__(self) -> int:
if self.length == -1:
self.length = sum(len(dp) for dp in self.datapipes)
return self.length
@functional_datapipe('zip')
class ZipperMapDataPipe(MapDataPipe[Tuple[T_co, ...]]):
r"""
Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted.
Args:
*datapipes: Map DataPipes being aggregated
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp1 = SequenceWrapper(range(3))
>>> dp2 = SequenceWrapper(range(10, 13))
>>> zip_dp = dp1.zip(dp2)
>>> list(zip_dp)
[(0, 10), (1, 11), (2, 12)]
"""
datapipes: Tuple[MapDataPipe[T_co], ...]
length: int
def __init__(self, *datapipes: MapDataPipe[T_co]) -> None:
if len(datapipes) == 0:
raise ValueError("Expected at least one DataPipe, but got nothing")
if not all(isinstance(dp, MapDataPipe) for dp in datapipes):
raise TypeError("Expected all inputs to be `MapDataPipe`")
if not all(isinstance(dp, Sized) for dp in datapipes):
raise TypeError("Expected all inputs to be `Sized`")
self.datapipes = datapipes
self.length = -1
def __getitem__(self, index) -> Tuple[T_co, ...]:
res = []
for dp in self.datapipes:
try:
res.append(dp[index])
except IndexError:
raise IndexError(f"Index {index} is out of range for one of the input MapDataPipes {dp}.")
return tuple(res)
def __len__(self) -> int:
if self.length == -1:
self.length = min(len(dp) for dp in self.datapipes)
return self.length
|
pytorch-master
|
torch/utils/data/datapipes/map/combining.py
|
import copy
import warnings
from torch.utils.data.datapipes.datapipe import MapDataPipe
__all__ = ["SequenceWrapperMapDataPipe", ]
class SequenceWrapperMapDataPipe(MapDataPipe):
r"""
Wraps a sequence object into a MapDataPipe.
Args:
sequence: Sequence object to be wrapped into an MapDataPipe
deepcopy: Option to deepcopy input sequence object
.. note::
If ``deepcopy`` is set to False explicitly, users should ensure
that data pipeline doesn't contain any in-place operations over
the iterable instance, in order to prevent data inconsistency
across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> dp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400})
>>> dp['a']
100
"""
def __init__(self, sequence, deepcopy=True):
if deepcopy:
try:
self.sequence = copy.deepcopy(sequence)
except TypeError:
warnings.warn(
"The input sequence can not be deepcopied, "
"please be aware of in-place modification would affect source data"
)
self.sequence = sequence
else:
self.sequence = sequence
def __getitem__(self, index):
return self.sequence[index]
def __len__(self):
return len(self.sequence)
|
pytorch-master
|
torch/utils/data/datapipes/map/utils.py
|
import random
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import MapDataPipe
from typing import Iterator, List, Optional, TypeVar
__all__ = ["ShufflerMapDataPipe", ]
T_co = TypeVar('T_co', covariant=True)
@functional_datapipe('shuffle')
class ShufflerMapDataPipe(MapDataPipe[T_co]):
r"""
Shuffle the input DataPipe via its indices (functional name: ``shuffle``).
When it is used with :class:`~torch.utils.data.DataLoader`, the methods to
set up random seed are different based on :attr:`num_workers`.
For single-process mode (:attr:`num_workers == 0`), the random seed is set before
the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed
for each worker process.
Args:
datapipe: MapDataPipe being shuffled
indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> shuffle_dp = dp.shuffle()
>>> list(shuffle_dp)
[0, 4, 1, 6, 3, 2, 9, 5, 7, 8]
"""
datapipe: MapDataPipe[T_co]
def __init__(self,
datapipe: MapDataPipe[T_co],
*,
indices: Optional[List] = None,
) -> None:
super().__init__()
self.datapipe = datapipe
self.indices = list(range(len(datapipe))) if indices is None else indices
self.index_map = {index_name: num_index for num_index, index_name in enumerate(self.indices)}
# We do not lazily shuffle because this way is significantly faster in terms of total time
random.shuffle(self.indices)
def __getitem__(self, index) -> T_co:
try:
old_numeric_index = self.index_map[index]
except KeyError:
raise IndexError(f"Index {index} is out of range for {self}.")
new_index = self.indices[old_numeric_index]
return self.datapipe[new_index]
# Without __iter__ implemented, by default it tries to use 0-index,
# which doesn't work when there is a custom index.
def __iter__(self) -> Iterator[T_co]:
for i in self.indices:
yield self.datapipe[i]
def __len__(self) -> int:
return len(self.datapipe)
|
pytorch-master
|
torch/utils/data/datapipes/map/combinatorics.py
|
pytorch-master
|
torch/contrib/__init__.py
|
|
import time
from collections import defaultdict
from functools import partial
from typing import DefaultDict
import torch
# Unfortunately it doesn't seem as if there was any way to get TensorBoard to do
# anything without having TF installed, and so this file has a hard dependency on it
# as well. It really is a debugging tool, so it doesn't matter.
try:
from tensorflow.core.util import event_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.summary.writer.writer import FileWriter
except ImportError:
raise ImportError("TensorBoard visualization of GraphExecutors requires having "
"TensorFlow installed") from None
def dump_tensorboard_summary(graph_executor, logdir):
with FileWriter(logdir) as w:
pb_graph = visualize(graph_executor)
evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString())
w.add_event(evt)
def visualize(graph, name_prefix='', pb_graph=None, executors_it=None):
"""Visualizes an independent graph, or a graph executor."""
value_map = {}
pb_graph = pb_graph or graph_pb2.GraphDef()
if isinstance(graph, torch._C.GraphExecutorState):
visualize_graph_executor(graph, name_prefix, pb_graph,
partial(visualize, pb_graph=pb_graph))
return pb_graph
# Set up an input node
input_node = pb_graph.node.add(op='input', name=name_prefix + 'input')
for i, value in enumerate(graph.param_node().outputs()):
value_map[value.unique()] = name_prefix + 'input:' + str(i)
visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it)
# Gather all outputs
return_node = pb_graph.node.add(op='output', name=name_prefix + 'output')
for value in graph.return_node().inputs():
return_node.input.append(value_map[value.unique()])
return pb_graph
def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph):
"""Appends the state of a given GraphExecutor to the graph protobuf.
Args:
state (GraphExecutor or GraphExecutorState): GraphExecutor to display.
name_prefix (str): Name prefix of the containing subgraph.
pb_graph (GraphDef): graph to append to.
inline_graph (Callable): a function that handles setting up a value_map,
so that some graphs in here can be inlined. This is necessary, because
this will simply be `visualize` for the top-level GraphExecutor,
or `inline_graph` for all nested ones.
The signature should look like (Graph, name_prefix) -> ().
It will be called exactly once.
The strategy is to embed all different configurations as independent subgraphs,
while inlining the original graph as the one that actually produces the values.
"""
if state.autograd_fallback_graph is not None:
visualize(graph=state.autograd_fallback_graph,
name_prefix=name_prefix + 'autograd_fallback/',
pb_graph=pb_graph,
executors_it=iter(state.autograd_fallback.executors()))
for i, (arg_spec, plan) in enumerate(state.execution_plans.items()):
subgraph_name = name_prefix + 'plan{}/'.format(i)
# Create a disconnected node that will keep information regarding the input
# types of this trace. This is unfortunately a bit too verbose to be included
# in the subgraph name.
input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name)
input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii')
visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors()))
# Show gradient as an independent subgraph of this plan
if plan.grad_executor is not None:
grad_subgraph_name = subgraph_name + 'grad/'
visualize(plan.grad_executor, grad_subgraph_name, pb_graph)
return inline_graph(state.graph, name_prefix + 'original/')
def visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it=None):
"""Recursive part of visualize (basically skips setting up the input and output nodes)."""
def inline_graph(subgraph, name, node):
rec_value_map = {inp.unique(): value_map[val.unique()]
for inp, val in zip(subgraph.inputs(), node.inputs())}
visualize_rec(graph=subgraph,
value_map=rec_value_map,
name_prefix=name,
pb_graph=pb_graph)
for out, val in zip(subgraph.outputs(), node.outputs()):
value_map[val.unique()] = rec_value_map[out.unique()]
op_id_counter: DefaultDict[str, int] = defaultdict(int)
def name_for(node):
kind = node.kind()[node.kind().index('::') + 2:]
op_id_counter[kind] += 1
return kind, name_prefix + kind + '_' + str(op_id_counter[kind])
def add_fusion_group(node):
op, name = name_for(node)
inline_graph(node.g('Subgraph'), name + '/', node)
def add_graph_executor(node):
op, name = name_for(node)
if executors_it is None:
add_node(node)
else:
ge = next(executors_it)
visualize_graph_executor(ge, name + '/', pb_graph,
partial(inline_graph, node=node))
def add_node(node):
if node.kind() == 'prim::FusionGroup':
return add_fusion_group(node)
elif node.kind() == 'prim::GraphExecutor':
return add_graph_executor(node)
op, name = name_for(node)
pb_node = pb_graph.node.add(op=op, name=name)
for value in node.inputs():
pb_node.input.append(value_map[value.unique()])
# TODO: handle attrs
for i, value in enumerate(node.outputs()):
value_map[value.unique()] = name + ':' + str(i)
for node in graph.nodes():
add_node(node)
|
pytorch-master
|
torch/contrib/_tensorboard_vis.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/observer.py`, while adding an import statement
here.
"""
from torch.ao.quantization.observer import (
_PartialWrapper,
_with_args,
_with_callable_args,
ABC,
ObserverBase,
_ObserverBase,
MinMaxObserver,
MovingAverageMinMaxObserver,
PerChannelMinMaxObserver,
MovingAveragePerChannelMinMaxObserver,
HistogramObserver,
PlaceholderObserver,
RecordingObserver,
NoopObserver,
_is_activation_post_process,
_is_per_channel_script_obs_instance,
get_observer_state_dict,
load_observer_state_dict,
default_observer,
default_placeholder_observer,
default_debug_observer,
default_weight_observer,
default_histogram_observer,
default_per_channel_weight_observer,
default_dynamic_quant_observer,
default_float_qparams_observer,
)
|
pytorch-master
|
torch/quantization/observer.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/fuse_modules.py`, while adding an import statement
here.
"""
from torch.ao.quantization.fuse_modules import fuse_modules
from torch.ao.quantization.fuse_modules import fuse_known_modules
from torch.ao.quantization.fuse_modules import get_fuser_method
# for backward compatiblity
from torch.quantization.fuser_method_mappings import fuse_conv_bn
from torch.quantization.fuser_method_mappings import fuse_conv_bn_relu
# TODO: These functions are not used outside the `fuse_modules.py`
# Keeping here for now, need to remove them later.
from torch.ao.quantization.fuse_modules import (
_fuse_modules,
_get_module,
_set_module,
)
|
pytorch-master
|
torch/quantization/fuse_modules.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/quantization_mappings.py`, while adding an import statement
here.
"""
from torch.ao.quantization.quantization_mappings import (
DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS,
DEFAULT_STATIC_QUANT_MODULE_MAPPINGS,
DEFAULT_QAT_MODULE_MAPPINGS,
DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS,
_INCLUDE_QCONFIG_PROPAGATE_LIST,
DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS,
DEFAULT_MODULE_TO_ACT_POST_PROCESS,
no_observer_set,
get_default_static_quant_module_mappings,
get_static_quant_module_class,
get_dynamic_quant_module_class,
get_default_qat_module_mappings,
get_default_dynamic_quant_module_mappings,
get_default_qconfig_propagation_list,
get_default_compare_output_module_list,
get_default_float_to_quantized_operator_mappings,
get_quantized_operator,
_get_special_act_post_process,
_has_special_act_post_process,
)
|
pytorch-master
|
torch/quantization/quantization_mappings.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/quantize.py`, while adding an import statement
here.
"""
from torch.ao.quantization.quantize import _convert
from torch.ao.quantization.quantize import _observer_forward_hook
from torch.ao.quantization.quantize import _propagate_qconfig_helper
from torch.ao.quantization.quantize import _remove_activation_post_process
from torch.ao.quantization.quantize import _remove_qconfig
from torch.ao.quantization.quantize import add_observer_
from torch.ao.quantization.quantize import add_quant_dequant
from torch.ao.quantization.quantize import convert
from torch.ao.quantization.quantize import get_observer_dict
from torch.ao.quantization.quantize import get_unique_devices_
from torch.ao.quantization.quantize import is_activation_post_process
from torch.ao.quantization.quantize import prepare
from torch.ao.quantization.quantize import prepare_qat
from torch.ao.quantization.quantize import propagate_qconfig_
from torch.ao.quantization.quantize import quantize
from torch.ao.quantization.quantize import quantize_dynamic
from torch.ao.quantization.quantize import quantize_qat
from torch.ao.quantization.quantize import register_activation_post_process_hook
from torch.ao.quantization.quantize import swap_module
|
pytorch-master
|
torch/quantization/quantize.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/ns/_numeric_suite.py`, while adding an import statement
here.
"""
from torch.ao.ns._numeric_suite import (
NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
_find_match,
compare_weights,
_get_logger_dict_helper,
get_logger_dict,
Logger,
ShadowLogger,
OutputLogger,
_convert_tuple_to_list,
_dequantize_tensor_list,
Shadow,
prepare_model_with_stubs,
_is_identical_module_type,
compare_model_stub,
get_matching_activations,
prepare_model_outputs,
compare_model_outputs,
)
|
pytorch-master
|
torch/quantization/_numeric_suite.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/fake_quantize.py`, while adding an import statement
here.
"""
from torch.ao.quantization.fake_quantize import (
_is_per_channel,
_is_per_tensor,
_is_symmetric_quant,
FakeQuantizeBase,
FakeQuantize,
FixedQParamsFakeQuantize,
FusedMovingAvgObsFakeQuantize,
default_fake_quant,
default_weight_fake_quant,
default_fixed_qparams_range_neg1to1_fake_quant,
default_fixed_qparams_range_0to1_fake_quant,
default_per_channel_weight_fake_quant,
default_histogram_fake_quant,
default_fused_act_fake_quant,
default_fused_wt_fake_quant,
default_fused_per_channel_wt_fake_quant,
_is_fake_quant_script_module,
disable_fake_quant,
enable_fake_quant,
disable_observer,
enable_observer,
)
|
pytorch-master
|
torch/quantization/fake_quantize.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/qconfig.py`, while adding an import statement
here.
"""
from torch.ao.quantization.qconfig import (
QConfig,
default_qconfig,
default_debug_qconfig,
default_per_channel_qconfig,
QConfigDynamic,
default_dynamic_qconfig,
float16_dynamic_qconfig,
float16_static_qconfig,
per_channel_dynamic_qconfig,
float_qparams_weight_only_qconfig,
default_qat_qconfig,
default_weight_only_qconfig,
default_activation_only_qconfig,
default_qat_qconfig_v2,
get_default_qconfig,
get_default_qat_qconfig,
assert_valid_qconfig,
QConfigAny,
add_module_to_qconfig_obs_ctr,
qconfig_equals
)
|
pytorch-master
|
torch/quantization/qconfig.py
|
from .quantize import * # noqa: F403
from .observer import * # noqa: F403
from .qconfig import * # noqa: F403
from .fake_quantize import * # noqa: F403
from .fuse_modules import fuse_modules
from .stubs import * # noqa: F403
from .quant_type import * # noqa: F403
from .quantize_jit import * # noqa: F403
# from .quantize_fx import *
from .quantization_mappings import * # noqa: F403
from .fuser_method_mappings import * # noqa: F403
def default_eval_fn(model, calib_data):
r"""
Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset
"""
for data, target in calib_data:
model(data)
# TODO(future PR): fix the typo, should be `__all__`
_all__ = [
'QuantWrapper', 'QuantStub', 'DeQuantStub',
# Top level API for eager mode quantization
'quantize', 'quantize_dynamic', 'quantize_qat',
'prepare', 'convert', 'prepare_qat',
# Top level API for graph mode quantization on TorchScript
'quantize_jit', 'quantize_dynamic_jit',
# Top level API for graph mode quantization on GraphModule(torch.fx)
# 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
# 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
'QuantType', 'quant_type_to_str', # quantization type
# custom module APIs
'get_default_static_quant_module_mappings', 'get_static_quant_module_class',
'get_default_dynamic_quant_module_mappings',
'get_default_qat_module_mappings',
'get_default_qconfig_propagation_list',
'get_default_compare_output_module_list',
'get_quantized_operator',
'get_fuser_method',
# Sub functions for `prepare` and `swap_module`
'propagate_qconfig_', 'add_quant_dequant', 'add_observer_', 'swap_module',
'default_eval_fn', 'get_observer_dict',
'register_activation_post_process_hook',
# Observers
'ObserverBase', 'WeightObserver', 'HistogramObserver',
'observer', 'default_observer',
'default_weight_observer', 'default_placeholder_observer',
'default_per_channel_weight_observer',
# FakeQuantize (for qat)
'default_fake_quant', 'default_weight_fake_quant',
'default_fixed_qparams_range_neg1to1_fake_quant',
'default_fixed_qparams_range_0to1_fake_quant',
'default_per_channel_weight_fake_quant',
'default_histogram_fake_quant',
# QConfig
'QConfig', 'default_qconfig', 'default_dynamic_qconfig', 'float16_dynamic_qconfig',
'float_qparams_weight_only_qconfig',
# QAT utilities
'default_qat_qconfig', 'prepare_qat', 'quantize_qat',
# module transformations
'fuse_modules',
]
|
pytorch-master
|
torch/quantization/__init__.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/stubs.py`, while adding an import statement
here.
"""
from torch.ao.quantization.stubs import (
QuantStub,
DeQuantStub,
QuantWrapper
)
|
pytorch-master
|
torch/quantization/stubs.py
|
# flake8: noqa: F401
r"""
Utils shared by different modes of quantization (eager/graph)
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/utils.py`, while adding an import statement
here.
"""
from torch.ao.quantization.utils import (
activation_dtype,
activation_is_int8_quantized,
activation_is_statically_quantized,
calculate_qmin_qmax,
check_min_max_valid,
get_combined_dict,
get_qconfig_dtypes,
get_qparam_dict,
get_quant_type,
get_swapped_custom_module_class,
getattr_from_fqn,
is_per_channel,
is_per_tensor,
weight_dtype,
weight_is_quantized,
weight_is_statically_quantized,
)
|
pytorch-master
|
torch/quantization/utils.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/fuser_method_mappings.py`, while adding an import statement
here.
"""
from torch.ao.quantization.fuser_method_mappings import (
fuse_conv_bn,
fuse_conv_bn_relu,
fuse_linear_bn,
DEFAULT_OP_LIST_TO_FUSER_METHOD,
get_fuser_method,
)
|
pytorch-master
|
torch/quantization/fuser_method_mappings.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/quantize_jit.py`, while adding an import statement
here.
"""
from torch.ao.quantization.quantize_jit import (
_check_is_script_module,
_check_forward_method,
script_qconfig,
script_qconfig_dict,
fuse_conv_bn_jit,
_prepare_jit,
prepare_jit,
prepare_dynamic_jit,
_convert_jit,
convert_jit,
convert_dynamic_jit,
_quantize_jit,
quantize_jit,
quantize_dynamic_jit
)
|
pytorch-master
|
torch/quantization/quantize_jit.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/quant_type.py`, while adding an import statement
here.
"""
from torch.ao.quantization.quant_type import QuantType
from torch.ao.quantization.quant_type import quant_type_to_str
|
pytorch-master
|
torch/quantization/quant_type.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/quantization/quantize_fx.py`, while adding an import statement
here.
"""
from torch.ao.quantization.quantize_fx import (
_check_is_graph_module,
_swap_ff_with_fxff,
_fuse_fx,
Scope,
ScopeContextManager,
QuantizationTracer,
_prepare_fx,
_prepare_standalone_module_fx,
fuse_fx,
prepare_fx,
prepare_qat_fx,
_convert_fx,
convert_fx,
_convert_standalone_module_fx,
)
from torch.ao.quantization.fx.graph_module import (
ObservedGraphModule,
)
|
pytorch-master
|
torch/quantization/quantize_fx.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
`torch/ao/ns/_numeric_suite_fx.py`, while adding an import statement
here.
"""
from torch.ao.ns._numeric_suite_fx import (
RNNReturnType,
OutputLogger,
NSTracer,
_extract_weights_one_model,
_extract_weights_impl,
extract_weights,
_add_loggers_one_model,
_add_loggers_impl,
add_loggers,
_extract_logger_info_one_model,
extract_logger_info,
_add_shadow_loggers_impl,
add_shadow_loggers,
extract_shadow_logger_info,
extend_logger_results_with_comparison,
)
|
pytorch-master
|
torch/quantization/_numeric_suite_fx.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.graph_module import (
GraphModule,
FusedGraphModule,
ObservedGraphModule,
is_observed_module,
ObservedStandaloneGraphModule,
is_observed_standalone_module,
QuantizedGraphModule
)
|
pytorch-master
|
torch/quantization/fx/graph_module.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.fusion_patterns import (
FuseHandler,
DefaultFuseHandler,
)
|
pytorch-master
|
torch/quantization/fx/fusion_patterns.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx._equalize import (
reshape_scale,
_InputEqualizationObserver,
_WeightEqualizationObserver,
calculate_equalization_scale,
EqualizationQConfig,
input_equalization_observer,
weight_equalization_observer,
default_equalization_qconfig,
fused_module_supports_equalization,
nn_module_supports_equalization,
custom_module_supports_equalization,
node_supports_equalization,
is_equalization_observer,
get_op_node_and_weight_eq_obs,
maybe_get_weight_eq_obs_node,
maybe_get_next_input_eq_obs,
maybe_get_next_equalization_scale,
scale_input_observer,
scale_weight_node,
scale_weight_functional,
clear_weight_quant_obs_node,
remove_node,
update_obs_for_equalization,
convert_eq_obs,
_convert_equalization_ref,
get_layer_sqnr_dict,
get_equalization_qconfig_dict,
CUSTOM_MODULE_SUPP_LIST,
)
|
pytorch-master
|
torch/quantization/fx/_equalize.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.quantization_types import (
Pattern,
QuantizerCls
)
|
pytorch-master
|
torch/quantization/fx/quantization_types.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.convert import convert
|
pytorch-master
|
torch/quantization/fx/convert.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
# omitting files that's unlikely to be used right now, for example
# the newly added lower_to_fbgemm etc.
from torch.ao.quantization.fx.prepare import prepare
from torch.ao.quantization.fx.convert import convert
from torch.ao.quantization.fx.fuse import fuse
|
pytorch-master
|
torch/quantization/fx/__init__.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.utils import (
graph_pretty_str,
get_per_tensor_qparams,
quantize_node,
get_custom_module_class_keys,
get_linear_prepack_op_for_dtype,
get_qconv_prepack_op,
get_qconv_op,
get_new_attr_name_with_prefix,
graph_module_from_producer_nodes,
assert_and_get_unique_device,
create_getattr_from_value,
create_qparam_nodes,
all_node_args_have_no_tensors,
node_return_type_is_int,
get_non_observable_arg_indexes_and_types,
is_get_tensor_info_node,
maybe_get_next_module
)
|
pytorch-master
|
torch/quantization/fx/utils.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.pattern_utils import (
QuantizeHandler,
register_fusion_pattern,
get_default_fusion_patterns,
register_quant_pattern,
get_default_quant_patterns,
get_default_output_activation_post_process_map
)
# QuantizeHandler.__module__ = _NAMESPACE
register_fusion_pattern.__module__ = "torch.quantization.fx.pattern_utils"
get_default_fusion_patterns.__module__ = "torch.quantization.fx.pattern_utils"
register_quant_pattern.__module__ = "torch.quantization.fx.pattern_utils"
get_default_quant_patterns.__module__ = "torch.quantization.fx.pattern_utils"
get_default_output_activation_post_process_map.__module__ = "torch.quantization.fx.pattern_utils"
# __all__ = [
# "QuantizeHandler",
# "register_fusion_pattern",
# "get_default_fusion_patterns",
# "register_quant_pattern",
# "get_default_quant_patterns",
# "get_default_output_activation_post_process_map",
# ]
|
pytorch-master
|
torch/quantization/fx/pattern_utils.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.fuse import fuse
|
pytorch-master
|
torch/quantization/fx/fuse.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.match_utils import (
MatchResult,
MatchAllNode,
is_match,
find_matches
)
|
pytorch-master
|
torch/quantization/fx/match_utils.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.prepare import (
prepare
)
|
pytorch-master
|
torch/quantization/fx/prepare.py
|
# flake8: noqa: F401
r"""
This file is in the process of migration to `torch/ao/quantization`, and
is kept here for compatibility while the migration process is ongoing.
If you are adding a new entry/functionality, please, add it to the
appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
here.
"""
from torch.ao.quantization.fx.quantization_patterns import (
QuantizeHandler,
BinaryOpQuantizeHandler,
CatQuantizeHandler,
ConvReluQuantizeHandler,
LinearReLUQuantizeHandler,
BatchNormQuantizeHandler,
EmbeddingQuantizeHandler,
RNNDynamicQuantizeHandler,
DefaultNodeQuantizeHandler,
FixedQParamsOpQuantizeHandler,
CopyNodeQuantizeHandler,
CustomModuleQuantizeHandler,
GeneralTensorShapeOpQuantizeHandler,
StandaloneModuleQuantizeHandler
)
QuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
BinaryOpQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
CatQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
ConvReluQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
LinearReLUQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
BatchNormQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
EmbeddingQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
RNNDynamicQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
DefaultNodeQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
FixedQParamsOpQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
CopyNodeQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
CustomModuleQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
GeneralTensorShapeOpQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
StandaloneModuleQuantizeHandler.__module__ = "torch.quantization.fx.quantization_patterns"
|
pytorch-master
|
torch/quantization/fx/quantization_patterns.py
|
import torch
from torch._C import _add_docstr, _special # type: ignore[attr-defined]
from torch._torch_docs import common_args, multi_dim_common
__all__ = [
'airy_ai',
'bessel_j0',
'bessel_j1',
'bessel_y0',
'bessel_y1',
'chebyshev_polynomial_t',
'chebyshev_polynomial_u',
'chebyshev_polynomial_v',
'chebyshev_polynomial_w',
'digamma',
'entr',
'erf',
'erfc',
'erfcx',
'erfinv',
'exp2',
'expit',
'expm1',
'gammainc',
'gammaincc',
'gammaln',
'hermite_polynomial_h',
'hermite_polynomial_he',
'i0',
'i0e',
'i1',
'i1e',
'laguerre_polynomial_l',
'legendre_polynomial_p',
'log1p',
'log_ndtr',
'log_softmax',
'logit',
'logsumexp',
'modified_bessel_i0',
'modified_bessel_i1',
'modified_bessel_k0',
'modified_bessel_k1',
'multigammaln',
'ndtr',
'ndtri',
'polygamma',
'psi',
'round',
'shifted_chebyshev_polynomial_t',
'shifted_chebyshev_polynomial_u',
'shifted_chebyshev_polynomial_v',
'shifted_chebyshev_polynomial_w',
'scaled_modified_bessel_k0',
'scaled_modified_bessel_k1',
'sinc',
'softmax',
'spherical_bessel_j0',
'xlog1py',
'xlogy',
'zeta',
]
Tensor = torch.Tensor
entr = _add_docstr(_special.special_entr,
r"""
entr(input, *, out=None) -> Tensor
Computes the entropy on :attr:`input` (as defined below), elementwise.
.. math::
\begin{align}
\text{entr(x)} = \begin{cases}
-x * \ln(x) & x > 0 \\
0 & x = 0.0 \\
-\infty & x < 0
\end{cases}
\end{align}
""" + """
Args:
input (Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> a = torch.arange(-0.5, 1, 0.5)
>>> a
tensor([-0.5000, 0.0000, 0.5000])
>>> torch.special.entr(a)
tensor([ -inf, 0.0000, 0.3466])
""")
psi = _add_docstr(_special.special_psi,
r"""
psi(input, *, out=None) -> Tensor
Alias for :func:`torch.special.digamma`.
""")
digamma = _add_docstr(_special.special_digamma,
r"""
digamma(input, *, out=None) -> Tensor
Computes the logarithmic derivative of the gamma function on `input`.
.. math::
\digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)}
""" + r"""
Args:
input (Tensor): the tensor to compute the digamma function on
Keyword args:
{out}
.. note:: This function is similar to SciPy's `scipy.special.digamma`.
.. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`.
Previously it returned `NaN` for `0`.
Example::
>>> a = torch.tensor([1, 0.5])
>>> torch.special.digamma(a)
tensor([-0.5772, -1.9635])
""".format(**common_args))
gammaln = _add_docstr(_special.special_gammaln,
r"""
gammaln(input, *, out=None) -> Tensor
Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
.. math::
\text{out}_{i} = \ln \Gamma(|\text{input}_{i}|)
""" + """
Args:
{input}
Keyword args:
{out}
Example::
>>> a = torch.arange(0.5, 2, 0.5)
>>> torch.special.gammaln(a)
tensor([ 0.5724, 0.0000, -0.1208])
""".format(**common_args))
polygamma = _add_docstr(_special.special_polygamma,
r"""
polygamma(n, input, *, out=None) -> Tensor
Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`.
:math:`n \geq 0` is called the order of the polygamma function.
.. math::
\psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x)
.. note::
This function is implemented only for nonnegative integers :math:`n \geq 0`.
""" + """
Args:
n (int): the order of the polygamma function
{input}
Keyword args:
{out}
Example::
>>> a = torch.tensor([1, 0.5])
>>> torch.special.polygamma(1, a)
tensor([1.64493, 4.9348])
>>> torch.special.polygamma(2, a)
tensor([ -2.4041, -16.8288])
>>> torch.special.polygamma(3, a)
tensor([ 6.4939, 97.4091])
>>> torch.special.polygamma(4, a)
tensor([ -24.8863, -771.4742])
""".format(**common_args))
erf = _add_docstr(_special.special_erf,
r"""
erf(input, *, out=None) -> Tensor
Computes the error function of :attr:`input`. The error function is defined as follows:
.. math::
\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.erf(torch.tensor([0, -1., 10.]))
tensor([ 0.0000, -0.8427, 1.0000])
""".format(**common_args))
erfc = _add_docstr(_special.special_erfc,
r"""
erfc(input, *, out=None) -> Tensor
Computes the complementary error function of :attr:`input`.
The complementary error function is defined as follows:
.. math::
\mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.erfc(torch.tensor([0, -1., 10.]))
tensor([ 1.0000, 1.8427, 0.0000])
""".format(**common_args))
erfcx = _add_docstr(_special.special_erfcx,
r"""
erfcx(input, *, out=None) -> Tensor
Computes the scaled complementary error function for each element of :attr:`input`.
The scaled complementary error function is defined as follows:
.. math::
\mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x)
""" + r"""
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.erfcx(torch.tensor([0, -1., 10.]))
tensor([ 1.0000, 5.0090, 0.0561])
""".format(**common_args))
erfinv = _add_docstr(_special.special_erfinv,
r"""
erfinv(input, *, out=None) -> Tensor
Computes the inverse error function of :attr:`input`.
The inverse error function is defined in the range :math:`(-1, 1)` as:
.. math::
\mathrm{erfinv}(\mathrm{erf}(x)) = x
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.erfinv(torch.tensor([0, 0.5, -1.]))
tensor([ 0.0000, 0.4769, -inf])
""".format(**common_args))
logit = _add_docstr(_special.special_logit,
r"""
logit(input, eps=None, *, out=None) -> Tensor
Returns a new tensor with the logit of the elements of :attr:`input`.
:attr:`input` is clamped to [eps, 1 - eps] when eps is not None.
When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN.
.. math::
\begin{align}
y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\
z_{i} &= \begin{cases}
x_{i} & \text{if eps is None} \\
\text{eps} & \text{if } x_{i} < \text{eps} \\
x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\
1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps}
\end{cases}
\end{align}
""" + r"""
Args:
{input}
eps (float, optional): the epsilon for input clamp bound. Default: ``None``
Keyword args:
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516])
>>> torch.special.logit(a, eps=1e-6)
tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261])
""".format(**common_args))
logsumexp = _add_docstr(_special.special_logsumexp,
r"""
logsumexp(input, dim, keepdim=False, *, out=None)
Alias for :func:`torch.logsumexp`.
""".format(**multi_dim_common))
expit = _add_docstr(_special.special_expit,
r"""
expit(input, *, out=None) -> Tensor
Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> t = torch.randn(4)
>>> t
tensor([ 0.9213, 1.0887, -0.8858, -1.7683])
>>> torch.special.expit(t)
tensor([ 0.7153, 0.7481, 0.2920, 0.1458])
""".format(**common_args))
exp2 = _add_docstr(_special.special_exp2,
r"""
exp2(input, *, out=None) -> Tensor
Computes the base two exponential function of :attr:`input`.
.. math::
y_{i} = 2^{x_{i}}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4]))
tensor([ 1., 2., 8., 16.])
""".format(**common_args))
expm1 = _add_docstr(_special.special_expm1,
r"""
expm1(input, *, out=None) -> Tensor
Computes the exponential of the elements minus 1
of :attr:`input`.
.. math::
y_{i} = e^{x_{i}} - 1
.. note:: This function provides greater precision than exp(x) - 1 for small values of x.
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.expm1(torch.tensor([0, math.log(2.)]))
tensor([ 0., 1.])
""".format(**common_args))
xlog1py = _add_docstr(_special.special_xlog1py,
r"""
xlog1py(input, other, *, out=None) -> Tensor
Computes ``input * log1p(other)`` with the following cases.
.. math::
\text{out}_{i} = \begin{cases}
\text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\
0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\
\text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise}
\end{cases}
Similar to SciPy's `scipy.special.xlog1py`.
""" + r"""
Args:
input (Number or Tensor) : Multiplier
other (Number or Tensor) : Argument
.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor.
Keyword args:
{out}
Example::
>>> x = torch.zeros(5,)
>>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
>>> torch.special.xlog1py(x, y)
tensor([0., 0., 0., 0., nan])
>>> x = torch.tensor([1, 2, 3])
>>> y = torch.tensor([3, 2, 1])
>>> torch.special.xlog1py(x, y)
tensor([1.3863, 2.1972, 2.0794])
>>> torch.special.xlog1py(x, 4)
tensor([1.6094, 3.2189, 4.8283])
>>> torch.special.xlog1py(2, y)
tensor([2.7726, 2.1972, 1.3863])
""".format(**common_args))
xlogy = _add_docstr(_special.special_xlogy,
r"""
xlogy(input, other, *, out=None) -> Tensor
Computes ``input * log(other)`` with the following cases.
.. math::
\text{out}_{i} = \begin{cases}
\text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\
0 & \text{if } \text{input}_{i} = 0.0 \\
\text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise}
\end{cases}
Similar to SciPy's `scipy.special.xlogy`.
""" + r"""
Args:
input (Number or Tensor) : Multiplier
other (Number or Tensor) : Argument
.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor.
Keyword args:
{out}
Example::
>>> x = torch.zeros(5,)
>>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
>>> torch.special.xlogy(x, y)
tensor([0., 0., 0., 0., nan])
>>> x = torch.tensor([1, 2, 3])
>>> y = torch.tensor([3, 2, 1])
>>> torch.special.xlogy(x, y)
tensor([1.0986, 1.3863, 0.0000])
>>> torch.special.xlogy(x, 4)
tensor([1.3863, 2.7726, 4.1589])
>>> torch.special.xlogy(2, y)
tensor([2.1972, 1.3863, 0.0000])
""".format(**common_args))
i0 = _add_docstr(_special.special_i0,
r"""
i0(input, *, out=None) -> Tensor
Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`.
.. math::
\text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2}
""" + r"""
Args:
input (Tensor): the input tensor
Keyword args:
{out}
Example::
>>> torch.i0(torch.arange(5, dtype=torch.float32))
tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019])
""".format(**common_args))
i0e = _add_docstr(_special.special_i0e,
r"""
i0e(input, *, out=None) -> Tensor
Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below)
for each element of :attr:`input`.
.. math::
\text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.i0e(torch.arange(5, dtype=torch.float32))
tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070])
""".format(**common_args))
i1 = _add_docstr(_special.special_i1,
r"""
i1(input, *, out=None) -> Tensor
Computes the first order modified Bessel function of the first kind (as defined below)
for each element of :attr:`input`.
.. math::
\text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.i1(torch.arange(5, dtype=torch.float32))
tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595])
""".format(**common_args))
i1e = _add_docstr(_special.special_i1e,
r"""
i1e(input, *, out=None) -> Tensor
Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below)
for each element of :attr:`input`.
.. math::
\text{out}_{i} = \exp(-|x|) * i1(x) =
\exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.i1e(torch.arange(5, dtype=torch.float32))
tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788])
""".format(**common_args))
ndtr = _add_docstr(_special.special_ndtr,
r"""
ndtr(input, *, out=None) -> Tensor
Computes the area under the standard Gaussian probability density function,
integrated from minus infinity to :attr:`input`, elementwise.
.. math::
\text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3]))
tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987])
""".format(**common_args))
ndtri = _add_docstr(_special.special_ndtri,
r"""
ndtri(input, *, out=None) -> Tensor
Computes the argument, x, for which the area under the Gaussian probability density function
(integrated from minus infinity to x) is equal to :attr:`input`, elementwise.
.. math::
\text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1)
.. note::
Also known as quantile function for Normal Distribution.
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1]))
tensor([ -inf, -0.6745, 0.0000, 0.6745, inf])
""".format(**common_args))
log_ndtr = _add_docstr(_special.special_log_ndtr,
r"""
log_ndtr(input, *, out=None) -> Tensor
Computes the log of the area under the standard Gaussian probability density function,
integrated from minus infinity to :attr:`input`, elementwise.
.. math::
\text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right)
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3]))
tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014])
""".format(**common_args))
log1p = _add_docstr(_special.special_log1p,
r"""
log1p(input, *, out=None) -> Tensor
Alias for :func:`torch.log1p`.
""")
sinc = _add_docstr(_special.special_sinc,
r"""
sinc(input, *, out=None) -> Tensor
Computes the normalized sinc of :attr:`input.`
.. math::
\text{out}_{i} =
\begin{cases}
1, & \text{if}\ \text{input}_{i}=0 \\
\sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise}
\end{cases}
""" + r"""
Args:
{input}
Keyword args:
{out}
Example::
>>> t = torch.randn(4)
>>> t
tensor([ 0.2252, -0.2948, 1.0267, -1.1566])
>>> torch.special.sinc(t)
tensor([ 0.9186, 0.8631, -0.0259, -0.1300])
""".format(**common_args))
round = _add_docstr(_special.special_round,
r"""
round(input, *, out=None) -> Tensor
Alias for :func:`torch.round`.
""")
softmax = _add_docstr(_special.special_softmax,
r"""
softmax(input, dim, *, dtype=None) -> Tensor
Computes the softmax function.
Softmax is defined as:
:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}`
It is applied to all slices along dim, and will re-scale them so that the elements
lie in the range `[0, 1]` and sum to 1.
Args:
input (Tensor): input
dim (int): A dimension along which softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is cast to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Examples::
>>> t = torch.ones(2, 2)
>>> torch.special.softmax(t, 0)
tensor([[0.5000, 0.5000],
[0.5000, 0.5000]])
""")
log_softmax = _add_docstr(_special.special_log_softmax,
r"""
log_softmax(input, dim, *, dtype=None) -> Tensor
Computes softmax followed by a logarithm.
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower and numerically unstable. This function
is computed as:
.. math::
\text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
""" + r"""
Args:
input (Tensor): input
dim (int): A dimension along which log_softmax will be computed.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is cast to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
Example::
>>> t = torch.ones(2, 2)
>>> torch.special.log_softmax(t, 0)
tensor([[-0.6931, -0.6931],
[-0.6931, -0.6931]])
""")
zeta = _add_docstr(_special.special_zeta,
r"""
zeta(input, other, *, out=None) -> Tensor
Computes the Hurwitz zeta function, elementwise.
.. math::
\zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x}
""" + r"""
Args:
input (Tensor): the input tensor corresponding to `x`.
other (Tensor): the input tensor corresponding to `q`.
.. note::
The Riemann zeta function corresponds to the case when `q = 1`
Keyword args:
{out}
Example::
>>> x = torch.tensor([2., 4.])
>>> torch.special.zeta(x, 1)
tensor([1.6449, 1.0823])
>>> torch.special.zeta(x, torch.tensor([1., 2.]))
tensor([1.6449, 0.0823])
>>> torch.special.zeta(2, torch.tensor([1., 2.]))
tensor([1.6449, 0.6449])
""".format(**common_args))
multigammaln = _add_docstr(_special.special_multigammaln,
r"""
multigammaln(input, p, *, out=None) -> Tensor
Computes the `multivariate log-gamma function
<https://en.wikipedia.org/wiki/Multivariate_gamma_function>`_ with dimension
:math:`p` element-wise, given by
.. math::
\log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right)
where :math:`C = \log(\pi) \times \frac{p (p - 1)}{4}` and :math:`\Gamma(\cdot)` is the Gamma function.
All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise an error would be thrown.
""" + """
Args:
input (Tensor): the tensor to compute the multivariate log-gamma function
p (int): the number of dimensions
Keyword args:
{out}
Example::
>>> a = torch.empty(2, 3).uniform_(1, 2)
>>> a
tensor([[1.6835, 1.8474, 1.1929],
[1.0475, 1.7162, 1.4180]])
>>> torch.special.multigammaln(a, 2)
tensor([[0.3928, 0.4007, 0.7586],
[1.0311, 0.3901, 0.5049]])
""".format(**common_args))
gammainc = _add_docstr(_special.special_gammainc,
r"""
gammainc(input, other, *, out=None) -> Tensor
Computes the regularized lower incomplete gamma function:
.. math::
\text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt
where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive
and at least one is strictly positive.
If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`.
:math:`\Gamma(\cdot)` in the equation above is the gamma function,
.. math::
\Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt.
See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`
and float inputs.
.. note::
The backward pass with respect to :attr:`input` is not yet supported.
Please open an issue on PyTorch's Github to request it.
""" + r"""
Args:
input (Tensor): the first non-negative input tensor
other (Tensor): the second non-negative input tensor
Keyword args:
{out}
Example::
>>> a1 = torch.tensor([4.0])
>>> a2 = torch.tensor([3.0, 4.0, 5.0])
>>> a = torch.special.gammaincc(a1, a2)
tensor([0.3528, 0.5665, 0.7350])
tensor([0.3528, 0.5665, 0.7350])
>>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2)
tensor([1., 1., 1.])
""".format(**common_args))
gammaincc = _add_docstr(_special.special_gammaincc,
r"""
gammaincc(input, other, *, out=None) -> Tensor
Computes the regularized upper incomplete gamma function:
.. math::
\text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt
where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive
and at least one is strictly positive.
If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`.
:math:`\Gamma(\cdot)` in the equation above is the gamma function,
.. math::
\Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt.
See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions.
Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`
and float inputs.
.. note::
The backward pass with respect to :attr:`input` is not yet supported.
Please open an issue on PyTorch's Github to request it.
""" + r"""
Args:
input (Tensor): the first non-negative input tensor
other (Tensor): the second non-negative input tensor
Keyword args:
{out}
Example::
>>> a1 = torch.tensor([4.0])
>>> a2 = torch.tensor([3.0, 4.0, 5.0])
>>> a = torch.special.gammaincc(a1, a2)
tensor([0.6472, 0.4335, 0.2650])
>>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2)
tensor([1., 1., 1.])
""".format(**common_args))
airy_ai = _add_docstr(_special.special_airy_ai,
r"""
airy_ai(input, *, out=None) -> Tensor
Airy function :math:`\text{Ai}\left(\text{input}\right)`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
bessel_j0 = _add_docstr(_special.special_bessel_j0,
r"""
bessel_j0(input, *, out=None) -> Tensor
Bessel function of the first kind of order :math:`0`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
bessel_j1 = _add_docstr(_special.special_bessel_j1,
r"""
bessel_j1(input, *, out=None) -> Tensor
Bessel function of the first kind of order :math:`1`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
bessel_y0 = _add_docstr(_special.special_bessel_y0,
r"""
bessel_y0(input, *, out=None) -> Tensor
Bessel function of the second kind of order :math:`0`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
bessel_y1 = _add_docstr(_special.special_bessel_y1,
r"""
bessel_y1(input, *, out=None) -> Tensor
Bessel function of the second kind of order :math:`1`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
chebyshev_polynomial_t = _add_docstr(_special.special_chebyshev_polynomial_t,
r"""
chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion:
.. math::
T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input})
is evaluated. Otherwise, the explicit trigonometric formula:
.. math::
T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x))
is evaluated.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
chebyshev_polynomial_u = _add_docstr(_special.special_chebyshev_polynomial_u,
r"""
chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`,
:math:`2 \times \text{input}` is returned. If :math:`n < 6` or
:math:`|\text{input}| > 1`, the recursion:
.. math::
T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input})
is evaluated. Otherwise, the explicit trigonometric formula:
.. math::
\frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))}
is evaluated.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
chebyshev_polynomial_v = _add_docstr(_special.special_chebyshev_polynomial_v,
r"""
chebyshev_polynomial_v(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
chebyshev_polynomial_w = _add_docstr(_special.special_chebyshev_polynomial_w,
r"""
chebyshev_polynomial_w(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h,
r"""
hermite_polynomial_h(input, n, *, out=None) -> Tensor
Physicist’s Hermite polynomial :math:`H_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
.. math::
H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input})
is evaluated.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he,
r"""
hermite_polynomial_he(input, n, *, out=None) -> Tensor
Probabilist’s Hermite polynomial :math:`He_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
.. math::
He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input})
is evaluated.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
laguerre_polynomial_l = _add_docstr(_special.special_laguerre_polynomial_l,
r"""
laguerre_polynomial_l(input, n, *, out=None) -> Tensor
Laguerre polynomial :math:`L_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
.. math::
L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input})
is evaluated.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
legendre_polynomial_p = _add_docstr(_special.special_legendre_polynomial_p,
r"""
legendre_polynomial_p(input, n, *, out=None) -> Tensor
Legendre polynomial :math:`P_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
.. math::
P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input})
is evaluated.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
modified_bessel_i0 = _add_docstr(_special.special_modified_bessel_i0,
r"""
modified_bessel_i0(input, *, out=None) -> Tensor
Modified Bessel function of the first kind of order :math:`0`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
modified_bessel_i1 = _add_docstr(_special.special_modified_bessel_i1,
r"""
modified_bessel_i1(input, *, out=None) -> Tensor
Modified Bessel function of the first kind of order :math:`1`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
modified_bessel_k0 = _add_docstr(_special.special_modified_bessel_k0,
r"""
modified_bessel_k0(input, *, out=None) -> Tensor
Modified Bessel function of the second kind of order :math:`0`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
modified_bessel_k1 = _add_docstr(_special.special_modified_bessel_k1,
r"""
modified_bessel_k1(input, *, out=None) -> Tensor
Modified Bessel function of the second kind of order :math:`1`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
scaled_modified_bessel_k0 = _add_docstr(_special.special_scaled_modified_bessel_k0,
r"""
scaled_modified_bessel_k0(input, *, out=None) -> Tensor
Scaled modified Bessel function of the second kind of order :math:`0`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
scaled_modified_bessel_k1 = _add_docstr(_special.special_scaled_modified_bessel_k1,
r"""
scaled_modified_bessel_k1(input, *, out=None) -> Tensor
Scaled modified Bessel function of the second kind of order :math:`1`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
shifted_chebyshev_polynomial_t = _add_docstr(_special.special_shifted_chebyshev_polynomial_t,
r"""
shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
shifted_chebyshev_polynomial_u = _add_docstr(_special.special_shifted_chebyshev_polynomial_u,
r"""
shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
shifted_chebyshev_polynomial_v = _add_docstr(_special.special_shifted_chebyshev_polynomial_v,
r"""
shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
shifted_chebyshev_polynomial_w = _add_docstr(_special.special_shifted_chebyshev_polynomial_w,
r"""
shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor
Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`.
""" + r"""
Args:
{input}
n (Tensor): Degree of the polynomial.
Keyword args:
{out}
""".format(**common_args))
spherical_bessel_j0 = _add_docstr(_special.special_spherical_bessel_j0,
r"""
spherical_bessel_j0(input, *, out=None) -> Tensor
Spherical Bessel function of the first kind of order :math:`0`.
""" + r"""
Args:
{input}
Keyword args:
{out}
""".format(**common_args))
|
pytorch-master
|
torch/special/__init__.py
|
"""
This module contains tensor creation utilities.
"""
import torch
from typing import Optional, List, Tuple, Union, cast
import math
import collections.abc
# Used by make_tensor for generating complex tensor.
complex_to_corresponding_float_type_map = {torch.complex32: torch.float16,
torch.complex64: torch.float32,
torch.complex128: torch.float64}
float_to_corresponding_complex_type_map = {v: k for k, v in complex_to_corresponding_float_type_map.items()}
def make_tensor(
*shape: Union[int, torch.Size, List[int], Tuple[int, ...]],
dtype: torch.dtype,
device: Union[str, torch.device],
low: Optional[float] = None,
high: Optional[float] = None,
requires_grad: bool = False,
noncontiguous: bool = False,
exclude_zero: bool = False
) -> torch.Tensor:
r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with
values uniformly drawn from ``[low, high)``.
If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable
finite values then they are clamped to the lowest or highest representable finite value, respectively.
If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`,
which depend on :attr:`dtype`.
+---------------------------+------------+----------+
| ``dtype`` | ``low`` | ``high`` |
+===========================+============+==========+
| boolean type | ``0`` | ``2`` |
+---------------------------+------------+----------+
| unsigned integral type | ``0`` | ``10`` |
+---------------------------+------------+----------+
| signed integral types | ``-9`` | ``10`` |
+---------------------------+------------+----------+
| floating types | ``-9`` | ``9`` |
+---------------------------+------------+----------+
| complex types | ``-9`` | ``9`` |
+---------------------------+------------+----------+
Args:
shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor.
dtype (:class:`torch.dtype`): The data type of the returned tensor.
device (Union[str, torch.device]): The device of the returned tensor.
low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is
clamped to the least representable finite value of the given dtype. When ``None`` (default),
this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is
clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value
is determined based on the :attr:`dtype` (see the table above). Default: ``None``.
requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``.
noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is
ignored if the constructed tensor has fewer than two elements.
exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value
depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating
point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the
:attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number
whose real and imaginary parts are both the smallest positive normal number representable by the complex
type. Default ``False``.
Raises:
ValueError: if ``requires_grad=True`` is passed for integral `dtype`
ValueError: If ``low > high``.
ValueError: If either :attr:`low` or :attr:`high` is ``nan``.
TypeError: If :attr:`dtype` isn't supported by this function.
Examples:
>>> from torch.testing import make_tensor
>>> # Creates a float tensor with values in [-1, 1)
>>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1)
>>> # xdoctest: +SKIP
tensor([ 0.1205, 0.2282, -0.6380])
>>> # Creates a bool tensor on CUDA
>>> make_tensor((2, 2), device='cuda', dtype=torch.bool)
tensor([[False, False],
[False, True]], device='cuda:0')
"""
def _modify_low_high(low, high, lowest, highest, default_low, default_high, dtype):
"""
Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high) if required.
"""
def clamp(a, l, h):
return min(max(a, l), h)
low = low if low is not None else default_low
high = high if high is not None else default_high
# Checks for error cases
if low != low or high != high:
raise ValueError("make_tensor: one of low or high was NaN!")
if low > high:
raise ValueError("make_tensor: low must be weakly less than high!")
low = clamp(low, lowest, highest)
high = clamp(high, lowest, highest)
if dtype in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
return math.floor(low), math.ceil(high)
return low, high
if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence):
shape = shape[0] # type: ignore[assignment]
shape = cast(Tuple[int, ...], tuple(shape))
_integral_types = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
_floating_types = [torch.float16, torch.bfloat16, torch.float32, torch.float64]
_complex_types = [torch.complex32, torch.complex64, torch.complex128]
if requires_grad and dtype not in _floating_types and dtype not in _complex_types:
raise ValueError("make_tensor: requires_grad must be False for integral dtype")
if dtype is torch.bool:
result = torch.randint(0, 2, shape, device=device, dtype=dtype) # type: ignore[call-overload]
elif dtype is torch.uint8:
ranges = (torch.iinfo(dtype).min, torch.iinfo(dtype).max)
low, high = cast(Tuple[int, int], _modify_low_high(low, high, ranges[0], ranges[1], 0, 10, dtype))
result = torch.randint(low, high, shape, device=device, dtype=dtype) # type: ignore[call-overload]
elif dtype in _integral_types:
ranges = (torch.iinfo(dtype).min, torch.iinfo(dtype).max)
low, high = _modify_low_high(low, high, ranges[0], ranges[1], -9, 10, dtype)
result = torch.randint(low, high, shape, device=device, dtype=dtype) # type: ignore[call-overload]
elif dtype in _floating_types:
ranges_floats = (torch.finfo(dtype).min, torch.finfo(dtype).max)
low, high = _modify_low_high(low, high, ranges_floats[0], ranges_floats[1], -9, 9, dtype)
rand_val = torch.rand(shape, device=device, dtype=dtype)
result = high * rand_val + low * (1 - rand_val)
elif dtype in _complex_types:
float_dtype = complex_to_corresponding_float_type_map[dtype]
ranges_floats = (torch.finfo(float_dtype).min, torch.finfo(float_dtype).max)
low, high = _modify_low_high(low, high, ranges_floats[0], ranges_floats[1], -9, 9, dtype)
real_rand_val = torch.rand(shape, device=device, dtype=float_dtype)
imag_rand_val = torch.rand(shape, device=device, dtype=float_dtype)
real = high * real_rand_val + low * (1 - real_rand_val)
imag = high * imag_rand_val + low * (1 - imag_rand_val)
result = torch.complex(real, imag)
else:
raise TypeError(f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()."
" To request support, file an issue at: https://github.com/pytorch/pytorch/issues")
if noncontiguous and result.numel() > 1:
result = torch.repeat_interleave(result, 2, dim=-1)
result = result[..., ::2]
if exclude_zero:
if dtype in _integral_types or dtype is torch.bool:
replace_with = torch.tensor(1, device=device, dtype=dtype)
elif dtype in _floating_types:
replace_with = torch.tensor(torch.finfo(dtype).tiny, device=device, dtype=dtype)
else: # dtype in _complex_types:
float_dtype = complex_to_corresponding_float_type_map[dtype]
float_eps = torch.tensor(torch.finfo(float_dtype).tiny, device=device, dtype=float_dtype)
replace_with = torch.complex(float_eps, float_eps)
result[result == 0] = replace_with
if dtype in _floating_types + _complex_types:
result.requires_grad = requires_grad
return result
|
pytorch-master
|
torch/testing/_creation.py
|
"""This module exists since the `torch.testing` exposed a lot of stuff that shouldn't have been public. Although this
was never documented anywhere, some other internal FB projects as well as downstream OSS projects might use this. Thus,
we don't internalize without warning, but still go through a deprecation cycle.
"""
import functools
import random
import warnings
from typing import Any, Callable, Dict, Optional, Tuple, Union
import torch
from . import _legacy
__all__ = [
"rand",
"randn",
"assert_allclose",
"get_all_device_types",
"make_non_contiguous",
]
def warn_deprecated(instructions: Union[str, Callable[[str, Tuple[Any, ...], Dict[str, Any], Any], str]]) -> Callable:
def outer_wrapper(fn: Callable) -> Callable:
name = fn.__name__
head = f"torch.testing.{name}() is deprecated since 1.12 and will be removed in 1.14. "
@functools.wraps(fn)
def inner_wrapper(*args: Any, **kwargs: Any) -> Any:
return_value = fn(*args, **kwargs)
tail = instructions(name, args, kwargs, return_value) if callable(instructions) else instructions
msg = (head + tail).strip()
warnings.warn(msg, FutureWarning)
return return_value
return inner_wrapper
return outer_wrapper
rand = warn_deprecated("Use torch.rand() instead.")(torch.rand)
randn = warn_deprecated("Use torch.randn() instead.")(torch.randn)
_DTYPE_PRECISIONS = {
torch.float16: (1e-3, 1e-3),
torch.float32: (1e-4, 1e-5),
torch.float64: (1e-5, 1e-8),
}
def _get_default_rtol_and_atol(actual: torch.Tensor, expected: torch.Tensor) -> Tuple[float, float]:
actual_rtol, actual_atol = _DTYPE_PRECISIONS.get(actual.dtype, (0.0, 0.0))
expected_rtol, expected_atol = _DTYPE_PRECISIONS.get(expected.dtype, (0.0, 0.0))
return max(actual_rtol, expected_rtol), max(actual_atol, expected_atol)
@warn_deprecated(
"Use torch.testing.assert_close() instead. "
"For detailed upgrade instructions see https://github.com/pytorch/pytorch/issues/61844."
)
def assert_allclose(
actual: Any,
expected: Any,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = True,
msg: str = "",
) -> None:
if not isinstance(actual, torch.Tensor):
actual = torch.tensor(actual)
if not isinstance(expected, torch.Tensor):
expected = torch.tensor(expected, dtype=actual.dtype)
if rtol is None and atol is None:
rtol, atol = _get_default_rtol_and_atol(actual, expected)
torch.testing.assert_close(
actual,
expected,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
check_device=True,
check_dtype=False,
check_stride=False,
msg=msg or None,
)
getter_instructions = (
lambda name, args, kwargs, return_value: f"This call can be replaced with {return_value}." # noqa: E731
)
# Deprecate and expose all dtype getters
for name in _legacy.__all_dtype_getters__:
fn = getattr(_legacy, name)
globals()[name] = warn_deprecated(getter_instructions)(fn)
__all__.append(name)
get_all_device_types = warn_deprecated(getter_instructions)(_legacy.get_all_device_types)
@warn_deprecated(
"Depending on the use case there a different replacement options:\n\n"
"- If you are using `make_non_contiguous` in combination with a creation function to create a noncontiguous tensor "
"with random values, use `torch.testing.make_tensor(..., noncontiguous=True)` instead.\n"
"- If you are using `make_non_contiguous` with a specific tensor, you can replace this call with "
"`torch.repeat_interleave(input, 2, dim=-1)[..., ::2]`.\n"
"- If you are using `make_non_contiguous` in the PyTorch test suite, use "
"`torch.testing._internal.common_utils.noncontiguous_like` instead."
)
def make_non_contiguous(tensor: torch.Tensor) -> torch.Tensor:
if tensor.numel() <= 1: # can't make non-contiguous
return tensor.clone()
osize = list(tensor.size())
# randomly inflate a few dimensions in osize
for _ in range(2):
dim = random.randint(0, len(osize) - 1)
add = random.randint(4, 15)
osize[dim] = osize[dim] + add
# narrow doesn't make a non-contiguous tensor if we only narrow the 0-th dimension,
# (which will always happen with a 1-dimensional tensor), so let's make a new
# right-most dimension and cut it off
input = tensor.new(torch.Size(osize + [random.randint(2, 3)]))
input = input.select(len(input.size()) - 1, random.randint(0, 1))
# now extract the input of correct size from 'input'
for i in range(len(osize)):
if input.size(i) != tensor.size(i):
bounds = random.randint(1, input.size(i) - tensor.size(i))
input = input.narrow(i, bounds, tensor.size(i))
input.copy_(tensor)
# Use .data here to hide the view relation between input and other temporary Tensors
return input.data
|
pytorch-master
|
torch/testing/_deprecated.py
|
from ._comparison import assert_close
from torch._C import FileCheck
from ._creation import make_tensor
from ._deprecated import * # noqa: F403
|
pytorch-master
|
torch/testing/__init__.py
|
"""This module exist to be able to deprecate functions publicly without doing so internally. The deprecated
public versions are defined in torch.testing._deprecated and exposed from torch.testing. The non-deprecated internal
versions should be imported from torch.testing._internal
"""
from typing import List
import torch
__all_dtype_getters__ = [
"_validate_dtypes",
"_dispatch_dtypes",
"all_types",
"all_types_and",
"all_types_and_complex",
"all_types_and_complex_and",
"all_types_and_half",
"complex_types",
"empty_types",
"floating_and_complex_types",
"floating_and_complex_types_and",
"floating_types",
"floating_types_and",
"double_types",
"floating_types_and_half",
"get_all_complex_dtypes",
"get_all_dtypes",
"get_all_fp_dtypes",
"get_all_int_dtypes",
"get_all_math_dtypes",
"integral_types",
"integral_types_and",
]
__all__ = [
*__all_dtype_getters__,
"get_all_device_types",
]
# Functions and classes for describing the dtypes a function supports
# NOTE: these helpers should correspond to PyTorch's C++ dispatch macros
# Verifies each given dtype is a torch.dtype
def _validate_dtypes(*dtypes):
for dtype in dtypes:
assert isinstance(dtype, torch.dtype)
return dtypes
# class for tuples corresponding to a PyTorch dispatch macro
class _dispatch_dtypes(tuple):
def __add__(self, other):
assert isinstance(other, tuple)
return _dispatch_dtypes(tuple.__add__(self, other))
_empty_types = _dispatch_dtypes(())
def empty_types():
return _empty_types
_floating_types = _dispatch_dtypes((torch.float32, torch.float64))
def floating_types():
return _floating_types
_floating_types_and_half = _floating_types + (torch.half,)
def floating_types_and_half():
return _floating_types_and_half
def floating_types_and(*dtypes):
return _floating_types + _validate_dtypes(*dtypes)
_floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble)
def floating_and_complex_types():
return _floating_and_complex_types
def floating_and_complex_types_and(*dtypes):
return _floating_and_complex_types + _validate_dtypes(*dtypes)
_double_types = _dispatch_dtypes((torch.float64, torch.complex128))
def double_types():
return _double_types
_integral_types = _dispatch_dtypes((torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64))
def integral_types():
return _integral_types
def integral_types_and(*dtypes):
return _integral_types + _validate_dtypes(*dtypes)
_all_types = _floating_types + _integral_types
def all_types():
return _all_types
def all_types_and(*dtypes):
return _all_types + _validate_dtypes(*dtypes)
_complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble))
def complex_types():
return _complex_types
def complex_types_and(*dtypes):
return _complex_types + _validate_dtypes(*dtypes)
_all_types_and_complex = _all_types + _complex_types
def all_types_and_complex():
return _all_types_and_complex
def all_types_and_complex_and(*dtypes):
return _all_types_and_complex + _validate_dtypes(*dtypes)
_all_types_and_half = _all_types + (torch.half,)
def all_types_and_half():
return _all_types_and_half
# The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro
# See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS.
def get_all_dtypes(include_half=True,
include_bfloat16=True,
include_bool=True,
include_complex=True,
include_complex32=False,
include_qint=False,
) -> List[torch.dtype]:
dtypes = get_all_int_dtypes() + get_all_fp_dtypes(include_half=include_half, include_bfloat16=include_bfloat16)
if include_bool:
dtypes.append(torch.bool)
if include_complex:
dtypes += get_all_complex_dtypes(include_complex32)
if include_qint:
dtypes += get_all_qint_dtypes()
return dtypes
def get_all_math_dtypes(device) -> List[torch.dtype]:
return get_all_int_dtypes() + get_all_fp_dtypes(include_half=device.startswith('cuda'),
include_bfloat16=False) + get_all_complex_dtypes()
def get_all_complex_dtypes(include_complex32=False) -> List[torch.dtype]:
return [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128]
def get_all_int_dtypes() -> List[torch.dtype]:
return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]:
dtypes = [torch.float32, torch.float64]
if include_half:
dtypes.append(torch.float16)
if include_bfloat16:
dtypes.append(torch.bfloat16)
return dtypes
def get_all_qint_dtypes() -> List[torch.dtype]:
return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4]
def get_all_device_types() -> List[str]:
return ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
|
pytorch-master
|
torch/testing/_legacy.py
|
import abc
import cmath
import collections.abc
import contextlib
from typing import NoReturn, Callable, Sequence, List, Union, Optional, Type, Tuple, Any, Collection
import torch
try:
import numpy as np
NUMPY_AVAILABLE = True
except ModuleNotFoundError:
NUMPY_AVAILABLE = False
class ErrorMeta(Exception):
"""Internal testing exception that makes that carries error meta data."""
def __init__(self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = ()) -> None:
super().__init__(
"If you are a user and see this message during normal operation "
"please file an issue at https://github.com/pytorch/pytorch/issues. "
"If you are a developer and working on the comparison functions, please `raise ErrorMeta().to_error()` "
"for user facing errors."
)
self.type = type
self.msg = msg
self.id = id
def to_error(self, msg: Optional[Union[str, Callable[[str], str]]] = None) -> Exception:
if not isinstance(msg, str):
generated_msg = self.msg
if self.id:
generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}"
msg = msg(generated_msg) if callable(msg) else generated_msg
return self.type(msg)
# Some analysis of tolerance by logging tests from test_torch.py can be found in
# https://github.com/pytorch/pytorch/pull/32538.
# {dtype: (rtol, atol)}
_DTYPE_PRECISIONS = {
torch.float16: (0.001, 1e-5),
torch.bfloat16: (0.016, 1e-5),
torch.float32: (1.3e-6, 1e-5),
torch.float64: (1e-7, 1e-7),
torch.complex32: (0.001, 1e-5),
torch.complex64: (1.3e-6, 1e-5),
torch.complex128: (1e-7, 1e-7),
}
# The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in
# their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values`
_DTYPE_PRECISIONS.update(
{
dtype: _DTYPE_PRECISIONS[torch.float32]
for dtype in (torch.quint8, torch.quint2x4, torch.quint4x2, torch.qint8, torch.qint32)
}
)
def default_tolerances(*inputs: Union[torch.Tensor, torch.dtype]) -> Tuple[float, float]:
"""Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype.
See :func:`assert_close` for a table of the default tolerance for each dtype.
Returns:
(Tuple[float, float]): Loosest tolerances of all input dtypes.
"""
dtypes = []
for input in inputs:
if isinstance(input, torch.Tensor):
dtypes.append(input.dtype)
elif isinstance(input, torch.dtype):
dtypes.append(input)
else:
raise TypeError(f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead.")
rtols, atols = zip(*[_DTYPE_PRECISIONS.get(dtype, (0.0, 0.0)) for dtype in dtypes])
return max(rtols), max(atols)
def get_tolerances(
*inputs: Union[torch.Tensor, torch.dtype], rtol: Optional[float], atol: Optional[float], id: Tuple[Any, ...] = ()
) -> Tuple[float, float]:
"""Gets absolute and relative to be used for numeric comparisons.
If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of
:func:`default_tolerances` is used.
Raises:
ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified.
Returns:
(Tuple[float, float]): Valid absolute and relative tolerances.
"""
if (rtol is None) ^ (atol is None):
# We require both tolerance to be omitted or specified, because specifying only one might lead to surprising
# results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0.
raise ErrorMeta(
ValueError,
f"Both 'rtol' and 'atol' must be either specified or omitted, "
f"but got no {'rtol' if rtol is None else 'atol'}.",
id=id,
)
elif rtol is not None and atol is not None:
return rtol, atol
else:
return default_tolerances(*inputs)
def _make_mismatch_msg(
*,
default_identifier: str,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
extra: Optional[str] = None,
abs_diff: float,
abs_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
atol: float,
rel_diff: float,
rel_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None,
rtol: float,
) -> str:
"""Makes a mismatch error message for numeric values.
Args:
default_identifier (str): Default description of the compared values, e.g. "Tensor-likes".
identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides
``default_identifier``. Can be passed as callable in which case it will be called with
``default_identifier`` to create the description at runtime.
extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics.
abs_diff (float): Absolute difference.
abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference.
atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are
``> 0``.
rel_diff (float): Relative difference.
rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference.
rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are
``> 0``.
"""
equality = rtol == 0 and atol == 0
def make_diff_msg(*, type: str, diff: float, idx: Optional[Union[int, Tuple[int, ...]]], tol: float) -> str:
if idx is None:
msg = f"{type.title()} difference: {diff}"
else:
msg = f"Greatest {type} difference: {diff} at index {idx}"
if not equality:
msg += f" (up to {tol} allowed)"
return msg + "\n"
if identifier is None:
identifier = default_identifier
elif callable(identifier):
identifier = identifier(default_identifier)
msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n"
if extra:
msg += f"{extra.strip()}\n"
msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol)
msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol)
return msg.strip()
def make_scalar_mismatch_msg(
actual: Union[int, float, complex],
expected: Union[int, float, complex],
*,
rtol: float,
atol: float,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
) -> str:
"""Makes a mismatch error message for scalars.
Args:
actual (Union[int, float, complex]): Actual scalar.
expected (Union[int, float, complex]): Expected scalar.
rtol (float): Relative tolerance.
atol (float): Absolute tolerance.
identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed
as callable in which case it will be called by the default value to create the description at runtime.
Defaults to "Scalars".
"""
abs_diff = abs(actual - expected)
rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected)
return _make_mismatch_msg(
default_identifier="Scalars",
identifier=identifier,
abs_diff=abs_diff,
atol=atol,
rel_diff=rel_diff,
rtol=rtol,
)
def make_tensor_mismatch_msg(
actual: torch.Tensor,
expected: torch.Tensor,
mismatches: torch.Tensor,
*,
rtol: float,
atol: float,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
):
"""Makes a mismatch error message for tensors.
Args:
actual (torch.Tensor): Actual tensor.
expected (torch.Tensor): Expected tensor.
mismatches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the
location of mismatches.
rtol (float): Relative tolerance.
atol (float): Absolute tolerance.
identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed
as callable in which case it will be called by the default value to create the description at runtime.
Defaults to "Tensor-likes".
"""
def unravel_flat_index(flat_index: int) -> Tuple[int, ...]:
if not mismatches.shape:
return ()
inverse_index = []
for size in mismatches.shape[::-1]:
div, mod = divmod(flat_index, size)
flat_index = div
inverse_index.append(mod)
return tuple(inverse_index[::-1])
number_of_elements = mismatches.numel()
total_mismatches = torch.sum(mismatches).item()
extra = (
f"Mismatched elements: {total_mismatches} / {number_of_elements} "
f"({total_mismatches / number_of_elements:.1%})"
)
a_flat = actual.flatten()
b_flat = expected.flatten()
matches_flat = ~mismatches.flatten()
abs_diff = torch.abs(a_flat - b_flat)
# Ensure that only mismatches are used for the max_abs_diff computation
abs_diff[matches_flat] = 0
max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0)
rel_diff = abs_diff / torch.abs(b_flat)
# Ensure that only mismatches are used for the max_rel_diff computation
rel_diff[matches_flat] = 0
max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0)
return _make_mismatch_msg(
default_identifier="Tensor-likes",
identifier=identifier,
extra=extra,
abs_diff=max_abs_diff.item(),
abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)),
atol=atol,
rel_diff=max_rel_diff.item(),
rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)),
rtol=rtol,
)
class UnsupportedInputs(Exception): # noqa: B903
"""Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs."""
class Pair(abc.ABC):
"""ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`.
Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison.
Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the
super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to
handle the inputs and the next pair type will be tried.
All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can
be used to automatically handle overwriting the message with a user supplied one and id handling.
"""
def __init__(
self,
actual: Any,
expected: Any,
*,
id: Tuple[Any, ...] = (),
**unknown_parameters: Any,
) -> None:
self.actual = actual
self.expected = expected
self.id = id
self._unknown_parameters = unknown_parameters
@staticmethod
def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]):
"""Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise."""
if not all(isinstance(input, cls) for input in inputs):
raise UnsupportedInputs()
def _make_error_meta(self, type: Type[Exception], msg: str) -> ErrorMeta:
"""Makes an :class:`ErrorMeta` from a given exception type and message and the stored id.
.. warning::
Since this method uses instance attributes of :class:`Pair`, it should not be used before the
``super().__init__(...)`` call in the constructor.
"""
return ErrorMeta(type, msg, id=self.id)
@abc.abstractmethod
def compare(self) -> None:
"""Compares the inputs and returns an :class`ErrorMeta` in case they mismatch."""
def extra_repr(self) -> Sequence[Union[str, Tuple[str, Any]]]:
"""Returns extra information that will be included in the representation.
Should be overwritten by all subclasses that use additional options. The representation of the object will only
be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of
key-value-pairs or attribute names.
"""
return []
def __repr__(self) -> str:
head = f"{type(self).__name__}("
tail = ")"
body = [
f" {name}={value!s},"
for name, value in [
("id", self.id),
("actual", self.actual),
("expected", self.expected),
*[(extra, getattr(self, extra)) if isinstance(extra, str) else extra for extra in self.extra_repr()],
]
]
return "\n".join((head, *body, *tail))
class ObjectPair(Pair):
"""Pair for any type of inputs that will be compared with the `==` operator.
.. note::
Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs
couldn't handle the inputs.
"""
def compare(self) -> None:
try:
equal = self.actual == self.expected
except Exception as error:
raise self._make_error_meta(
ValueError, f"{self.actual} == {self.expected} failed with:\n{error}."
) from error
if not equal:
raise self._make_error_meta(AssertionError, f"{self.actual} != {self.expected}")
class NonePair(Pair):
"""Pair for ``None`` inputs."""
def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None:
if not (actual is None or expected is None):
raise UnsupportedInputs()
super().__init__(actual, expected, **other_parameters)
def compare(self) -> None:
if not (self.actual is None and self.expected is None):
raise self._make_error_meta(AssertionError, f"None mismatch: {self.actual} is not {self.expected}")
class BooleanPair(Pair):
"""Pair for :class:`bool` inputs.
.. note::
If ``numpy`` is available, also handles :class:`numpy.bool_` inputs.
"""
def __init__(self, actual: Any, expected: Any, *, id: Tuple[Any, ...], **other_parameters: Any) -> None:
actual, expected = self._process_inputs(actual, expected, id=id)
super().__init__(actual, expected, **other_parameters)
@property
def _supported_types(self) -> Tuple[Type, ...]:
cls: List[Type] = [bool]
if NUMPY_AVAILABLE:
cls.append(np.bool_)
return tuple(cls)
def _process_inputs(self, actual: Any, expected: Any, *, id: Tuple[Any, ...]) -> Tuple[bool, bool]:
self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
actual, expected = [self._to_bool(bool_like, id=id) for bool_like in (actual, expected)]
return actual, expected
def _to_bool(self, bool_like: Any, *, id: Tuple[Any, ...]) -> bool:
if isinstance(bool_like, bool):
return bool_like
elif isinstance(bool_like, np.bool_):
return bool_like.item()
else:
raise ErrorMeta(TypeError, f"Unknown boolean type {type(bool_like)}.", id=id)
def compare(self) -> None:
if self.actual is not self.expected:
raise self._make_error_meta(AssertionError, f"Booleans mismatch: {self.actual} is not {self.expected}")
class NumberPair(Pair):
"""Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs.
.. note::
If ``numpy`` is available, also handles :class:`numpy.number` inputs.
Kwargs:
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
values based on the type are selected with the below table.
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
values based on the type are selected with the below table.
equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``.
The following table displays correspondence between Python number type and the ``torch.dtype``'s. See
:func:`assert_close` for the corresponding tolerances.
+------------------+-------------------------------+
| ``type`` | corresponding ``torch.dtype`` |
+==================+===============================+
| :class:`int` | :attr:`~torch.int64` |
+------------------+-------------------------------+
| :class:`float` | :attr:`~torch.float64` |
+------------------+-------------------------------+
| :class:`complex` | :attr:`~torch.complex64` |
+------------------+-------------------------------+
"""
_TYPE_TO_DTYPE = {
int: torch.int64,
float: torch.float64,
complex: torch.complex128,
}
_NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys())
def __init__(
self,
actual: Any,
expected: Any,
*,
id: Tuple[Any, ...] = (),
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = False,
check_dtype: bool = False,
**other_parameters: Any,
) -> None:
actual, expected = self._process_inputs(actual, expected, id=id)
super().__init__(actual, expected, id=id, **other_parameters)
self.rtol, self.atol = get_tolerances(
*[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)], rtol=rtol, atol=atol, id=id
)
self.equal_nan = equal_nan
self.check_dtype = check_dtype
@property
def _supported_types(self) -> Tuple[Type, ...]:
cls = list(self._NUMBER_TYPES)
if NUMPY_AVAILABLE:
cls.append(np.number)
return tuple(cls)
def _process_inputs(
self, actual: Any, expected: Any, *, id: Tuple[Any, ...]
) -> Tuple[Union[int, float, complex], Union[int, float, complex]]:
self._check_inputs_isinstance(actual, expected, cls=self._supported_types)
actual, expected = [self._to_number(number_like, id=id) for number_like in (actual, expected)]
return actual, expected
def _to_number(self, number_like: Any, *, id: Tuple[Any, ...]) -> Union[int, float, complex]:
if NUMPY_AVAILABLE and isinstance(number_like, np.number):
return number_like.item()
elif isinstance(number_like, self._NUMBER_TYPES):
return number_like
else:
raise ErrorMeta(TypeError, f"Unknown number type {type(number_like)}.", id=id)
def compare(self) -> None:
if self.check_dtype and type(self.actual) is not type(self.expected):
raise self._make_error_meta(
AssertionError,
f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.",
)
if self.actual == self.expected:
return
if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected):
return
abs_diff = abs(self.actual - self.expected)
tolerance = self.atol + self.rtol * abs(self.expected)
if cmath.isfinite(abs_diff) and abs_diff <= tolerance:
return
raise self._make_error_meta(
AssertionError, make_scalar_mismatch_msg(self.actual, self.expected, rtol=self.rtol, atol=self.atol)
)
def extra_repr(self) -> Sequence[str]:
return (
"rtol",
"atol",
"equal_nan",
"check_dtype",
)
class TensorLikePair(Pair):
"""Pair for :class:`torch.Tensor`-like inputs.
Kwargs:
allow_subclasses (bool):
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
values based on the type are selected. See :func:assert_close: for details.
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
values based on the type are selected. See :func:assert_close: for details.
equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``.
check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
:attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
:attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
:func:`torch.promote_types`) before being compared.
check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
compared.
check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
check_is_coalesced (bool): If ``True`` (default) and corresponding tensors are sparse COO, checks that both
``actual`` and ``expected`` are either coalesced or uncoalesced. If this check is disabled, tensors are
:meth:`~torch.Tensor.coalesce`'ed before being compared.
"""
def __init__(
self,
actual: Any,
expected: Any,
*,
id: Tuple[Any, ...] = (),
allow_subclasses: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = False,
check_device: bool = True,
check_dtype: bool = True,
check_layout: bool = True,
check_stride: bool = False,
check_is_coalesced: bool = True,
**other_parameters: Any,
):
actual, expected = self._process_inputs(actual, expected, id=id, allow_subclasses=allow_subclasses)
super().__init__(actual, expected, id=id, **other_parameters)
self.rtol, self.atol = get_tolerances(actual, expected, rtol=rtol, atol=atol, id=self.id)
self.equal_nan = equal_nan
self.check_device = check_device
self.check_dtype = check_dtype
self.check_layout = check_layout
self.check_stride = check_stride
self.check_is_coalesced = check_is_coalesced
def _process_inputs(
self, actual: Any, expected: Any, *, id: Tuple[Any, ...], allow_subclasses: bool
) -> Tuple[torch.Tensor, torch.Tensor]:
directly_related = isinstance(actual, type(expected)) or isinstance(expected, type(actual))
if not directly_related:
raise UnsupportedInputs()
if not allow_subclasses and type(actual) is not type(expected):
raise UnsupportedInputs()
actual, expected = [self._to_tensor(input) for input in (actual, expected)]
for tensor in (actual, expected):
self._check_supported(tensor, id=id)
return actual, expected
def _to_tensor(self, tensor_like: Any) -> torch.Tensor:
if isinstance(tensor_like, torch.Tensor):
return tensor_like
try:
return torch.as_tensor(tensor_like)
except Exception:
raise UnsupportedInputs()
def _check_supported(self, tensor: torch.Tensor, *, id: Tuple[Any, ...]) -> None:
if tensor.layout not in {torch.strided,
torch.sparse_coo,
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc}:
raise ErrorMeta(ValueError, f"Unsupported tensor layout {tensor.layout}", id=id)
def compare(self) -> None:
actual, expected = self.actual, self.expected
self._compare_attributes(actual, expected)
if any(input.device.type == "meta" for input in (actual, expected)):
return
actual, expected = self._equalize_attributes(actual, expected)
self._compare_values(actual, expected)
def _compare_attributes(
self,
actual: torch.Tensor,
expected: torch.Tensor,
) -> None:
"""Checks if the attributes of two tensors match.
Always checks
- the :attr:`~torch.Tensor.shape`,
- whether both inputs are quantized or not,
- and if they use the same quantization scheme.
Checks for
- :attr:`~torch.Tensor.layout`,
- :meth:`~torch.Tensor.stride`,
- :attr:`~torch.Tensor.device`, and
- :attr:`~torch.Tensor.dtype`
are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair.
"""
def raise_mismatch_error(attribute_name: str, actual_value: Any, expected_value: Any) -> NoReturn:
raise self._make_error_meta(
AssertionError,
f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.",
)
if actual.shape != expected.shape:
raise_mismatch_error("shape", actual.shape, expected.shape)
if actual.is_quantized != expected.is_quantized:
raise_mismatch_error("is_quantized", actual.is_quantized, expected.is_quantized)
elif actual.is_quantized and actual.qscheme() != expected.qscheme():
raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme())
if actual.layout != expected.layout:
if self.check_layout:
raise_mismatch_error("layout", actual.layout, expected.layout)
elif actual.layout == torch.strided and self.check_stride and actual.stride() != expected.stride():
raise_mismatch_error("stride()", actual.stride(), expected.stride())
if self.check_device and actual.device != expected.device:
raise_mismatch_error("device", actual.device, expected.device)
if self.check_dtype and actual.dtype != expected.dtype:
raise_mismatch_error("dtype", actual.dtype, expected.dtype)
def _equalize_attributes(self, actual: torch.Tensor, expected: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Equalizes some attributes of two tensors for value comparison.
If ``actual`` and ``expected`` are ...
- ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory.
- ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to
:func:`torch.promote_types`).
- ... not of the same ``layout``, they are converted to strided tensors.
Args:
actual (Tensor): Actual tensor.
expected (Tensor): Expected tensor.
Returns:
(Tuple[Tensor, Tensor]): Equalized tensors.
"""
# The comparison logic uses operators currently not supported by the MPS backends.
# See https://github.com/pytorch/pytorch/issues/77144 for details.
# TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend
if actual.is_mps or expected.is_mps: # type: ignore[attr-defined]
actual = actual.cpu()
expected = expected.cpu()
if actual.device != expected.device:
actual = actual.cpu()
expected = expected.cpu()
if actual.dtype != expected.dtype:
dtype = torch.promote_types(actual.dtype, expected.dtype)
actual = actual.to(dtype)
expected = expected.to(dtype)
if actual.layout != expected.layout:
# These checks are needed, since Tensor.to_dense() fails on tensors that are already strided
actual = actual.to_dense() if actual.layout != torch.strided else actual
expected = expected.to_dense() if expected.layout != torch.strided else expected
return actual, expected
def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None:
if actual.is_quantized:
compare_fn = self._compare_quantized_values
elif actual.is_sparse:
compare_fn = self._compare_sparse_coo_values
elif actual.layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc}:
compare_fn = self._compare_sparse_compressed_values
else:
compare_fn = self._compare_regular_values_close
compare_fn(actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan)
def _compare_quantized_values(
self, actual: torch.Tensor, expected: torch.Tensor, *, rtol: float, atol: float, equal_nan: bool
) -> None:
"""Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness.
.. note::
A detailed discussion about why only the dequantized variant is checked for closeness rather than checking
the individual quantization parameters for closeness and the integer representation for equality can be
found in https://github.com/pytorch/pytorch/issues/68548.
"""
return self._compare_regular_values_close(
actual.dequantize(),
expected.dequantize(),
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}",
)
def _compare_sparse_coo_values(
self, actual: torch.Tensor, expected: torch.Tensor, *, rtol: float, atol: float, equal_nan: bool
) -> None:
"""Compares sparse COO tensors by comparing
- the number of sparse dimensions,
- the number of non-zero elements (nnz) for equality,
- the indices for equality, and
- the values for closeness.
"""
if actual.sparse_dim() != expected.sparse_dim():
raise self._make_error_meta(
AssertionError,
(
f"The number of sparse dimensions in sparse COO tensors does not match: "
f"{actual.sparse_dim()} != {expected.sparse_dim()}"
),
)
if actual._nnz() != expected._nnz():
raise self._make_error_meta(
AssertionError,
(
f"The number of specified values in sparse COO tensors does not match: "
f"{actual._nnz()} != {expected._nnz()}"
),
)
self._compare_regular_values_equal(
actual._indices(),
expected._indices(),
identifier="Sparse COO indices",
)
self._compare_regular_values_close(
actual._values(),
expected._values(),
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
identifier="Sparse COO values",
)
def _compare_sparse_compressed_values(
self, actual: torch.Tensor, expected: torch.Tensor, *, rtol: float, atol: float, equal_nan: bool
) -> None:
"""Compares sparse compressed tensors by comparing
- the number of non-zero elements (nnz) for equality,
- the plain indices for equality,
- the compressed indices for equality, and
- the values for closeness.
"""
format_name, compressed_indices_method, plain_indices_method = {
torch.sparse_csr: ('CSR', torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: ('CSC', torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: ('BSR', torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: ('BSC', torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}[actual.layout]
if actual._nnz() != expected._nnz():
raise self._make_error_meta(
AssertionError,
(
f"The number of specified values in sparse {format_name} tensors does not match: "
f"{actual._nnz()} != {expected._nnz()}"
),
)
self._compare_regular_values_equal(
compressed_indices_method(actual),
compressed_indices_method(expected),
identifier=f"Sparse {format_name} {compressed_indices_method.__name__}",
)
self._compare_regular_values_equal(
plain_indices_method(actual),
plain_indices_method(expected),
identifier=f"Sparse {format_name} {plain_indices_method.__name__}",
)
self._compare_regular_values_close(
actual.values(),
expected.values(),
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
identifier=f"Sparse {format_name} values",
)
def _compare_regular_values_equal(
self,
actual: torch.Tensor,
expected: torch.Tensor,
*,
equal_nan: bool = False,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
) -> None:
"""Checks if the values of two tensors are equal."""
self._compare_regular_values_close(actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier)
def _compare_regular_values_close(
self,
actual: torch.Tensor,
expected: torch.Tensor,
*,
rtol: float,
atol: float,
equal_nan: bool,
identifier: Optional[Union[str, Callable[[str], str]]] = None,
) -> None:
"""Checks if the values of two tensors are close up to a desired tolerance."""
actual, expected = self._promote_for_comparison(actual, expected)
matches = torch.isclose(actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan)
if torch.all(matches):
return
if actual.shape == torch.Size([]):
msg = make_scalar_mismatch_msg(actual.item(), expected.item(), rtol=rtol, atol=atol, identifier=identifier)
else:
msg = make_tensor_mismatch_msg(actual, expected, ~matches, rtol=rtol, atol=atol, identifier=identifier)
raise self._make_error_meta(AssertionError, msg)
def _promote_for_comparison(
self, actual: torch.Tensor, expected: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Promotes the inputs to the comparison dtype based on the input dtype.
Returns:
Inputs promoted to the highest precision dtype of the same dtype category. :class:`torch.bool` is treated
as integral dtype.
"""
# This is called after self._equalize_attributes() and thus `actual` and `expected` already have the same dtype.
if actual.dtype.is_complex:
dtype = torch.complex128
elif actual.dtype.is_floating_point:
dtype = torch.float64
else:
dtype = torch.int64
return actual.to(dtype), expected.to(dtype)
def extra_repr(self) -> Sequence[str]:
return (
"rtol",
"atol",
"equal_nan",
"check_device",
"check_dtype",
"check_layout",
"check_stride",
"check_is_coalesced",
)
def originate_pairs(
actual: Any,
expected: Any,
*,
pair_types: Sequence[Type[Pair]],
sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
id: Tuple[Any, ...] = (),
**options: Any,
) -> List[Pair]:
"""Originates pairs from the individual inputs.
``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
:class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs.
First successful pair will be used.
sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message.
**options (Any): Options passed to each pair during construction.
Raises:
ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their
length does not match.
ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of
keys do not match.
ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs.
ErrorMeta: With any expected exception that happens during the construction of a pair.
Returns:
(List[Pair]): Originated pairs.
"""
# We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop:
# "a" == "a"[0][0]...
if (
isinstance(actual, sequence_types)
and not isinstance(actual, str)
and isinstance(expected, sequence_types)
and not isinstance(expected, str)
):
actual_len = len(actual)
expected_len = len(expected)
if actual_len != expected_len:
raise ErrorMeta(
AssertionError, f"The length of the sequences mismatch: {actual_len} != {expected_len}", id=id
)
pairs = []
for idx in range(actual_len):
pairs.extend(
originate_pairs(
actual[idx],
expected[idx],
pair_types=pair_types,
sequence_types=sequence_types,
mapping_types=mapping_types,
id=(*id, idx),
**options,
)
)
return pairs
elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types):
actual_keys = set(actual.keys())
expected_keys = set(expected.keys())
if actual_keys != expected_keys:
missing_keys = expected_keys - actual_keys
additional_keys = actual_keys - expected_keys
raise ErrorMeta(
AssertionError,
(
f"The keys of the mappings do not match:\n"
f"Missing keys in the actual mapping: {sorted(missing_keys)}\n"
f"Additional keys in the actual mapping: {sorted(additional_keys)}"
),
id=id,
)
keys: Collection = actual_keys
# Since the origination aborts after the first failure, we try to be deterministic
with contextlib.suppress(Exception):
keys = sorted(keys)
pairs = []
for key in keys:
pairs.extend(
originate_pairs(
actual[key],
expected[key],
pair_types=pair_types,
sequence_types=sequence_types,
mapping_types=mapping_types,
id=(*id, key),
**options,
)
)
return pairs
else:
for pair_type in pair_types:
try:
return [pair_type(actual, expected, id=id, **options)]
# Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the
# inputs. Thus, we try the next pair type.
except UnsupportedInputs:
continue
# Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This
# is only in a separate branch, because the one below would also except it.
except ErrorMeta:
raise
# Raising any other exception during origination is unexpected and will give some extra information about
# what happened. If applicable, the exception should be expected in the future.
except Exception as error:
raise RuntimeError(
f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n"
f"{type(actual).__name__}(): {actual}\n\n"
f"and\n\n"
f"{type(expected).__name__}(): {expected}\n\n"
f"resulted in the unexpected exception above. "
f"If you are a user and see this message during normal operation "
"please file an issue at https://github.com/pytorch/pytorch/issues. "
"If you are a developer and working on the comparison functions, "
"please except the previous error and raise an expressive `ErrorMeta` instead."
) from error
else:
raise ErrorMeta(
TypeError,
f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.",
id=id,
)
def assert_equal(
actual: Any,
expected: Any,
*,
pair_types: Sequence[Type[Pair]] = (ObjectPair,),
sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,),
mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,),
msg: Optional[Union[str, Callable[[str], str]]] = None,
**options: Any,
) -> None:
"""Asserts that inputs are equal.
``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or
:class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the
inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`.
sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise.
mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise.
**options (Any): Options passed to each pair during construction.
"""
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
try:
pairs = originate_pairs(
actual,
expected,
pair_types=pair_types,
sequence_types=sequence_types,
mapping_types=mapping_types,
**options,
)
except ErrorMeta as error_meta:
# Explicitly raising from None to hide the internal traceback
raise error_meta.to_error() from None
error_metas: List[ErrorMeta] = []
for pair in pairs:
try:
pair.compare()
except ErrorMeta as error_meta:
error_metas.append(error_meta)
# Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information
# about what happened. If applicable, the exception should be expected in the future.
except Exception as error:
raise RuntimeError(
f"Comparing\n\n"
f"{pair}\n\n"
f"resulted in the unexpected exception above. "
f"If you are a user and see this message during normal operation "
"please file an issue at https://github.com/pytorch/pytorch/issues. "
"If you are a developer and working on the comparison functions, "
"please except the previous error and raise an expressive `ErrorMeta` instead."
) from error
if not error_metas:
return
# TODO: compose all metas into one AssertionError
raise error_metas[0].to_error(msg)
def assert_close(
actual: Any,
expected: Any,
*,
allow_subclasses: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
equal_nan: bool = False,
check_device: bool = True,
check_dtype: bool = True,
check_layout: bool = True,
check_stride: bool = False,
msg: Optional[Union[str, Callable[[str], str]]] = None,
):
r"""Asserts that ``actual`` and ``expected`` are close.
If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if
.. math::
\lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert
Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are
only considered equal to each other if ``equal_nan`` is ``True``.
In addition, they are only considered close if they have the same
- :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``),
- ``dtype`` (if ``check_dtype`` is ``True``),
- ``layout`` (if ``check_layout`` is ``True``), and
- stride (if ``check_stride`` is ``True``).
If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed.
If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are
checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR,
or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively,
are always checked for equality whereas the values are checked for closeness according to the definition above.
If ``actual`` and ``expected`` are quantized, they are considered close if they have the same
:meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the
definition above.
``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which
:class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types
have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s
or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all
their elements are considered close according to the above definition.
.. note::
Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e.
:class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus,
Python scalars of different types can be checked, but require ``check_dtype=False``.
Args:
actual (Any): Actual input.
expected (Any): Expected input.
allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types
are allowed. Otherwise type equality is required.
rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default
values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default
values based on the :attr:`~torch.Tensor.dtype` are selected with the below table.
equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal.
check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same
:attr:`~torch.Tensor.device`. If this check is disabled, tensors on different
:attr:`~torch.Tensor.device`'s are moved to the CPU before being compared.
check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this
check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to
:func:`torch.promote_types`) before being compared.
check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this
check is disabled, tensors with different ``layout``'s are converted to strided tensors before being
compared.
check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride.
msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during
the comparison. Can also passed as callable in which case it will be called with the generated message and
should return the new message.
Raises:
ValueError: If no :class:`torch.Tensor` can be constructed from an input.
ValueError: If only ``rtol`` or ``atol`` is specified.
NotImplementedError: If a tensor is a meta tensor. This is a temporary restriction and will be relaxed in the
future.
AssertionError: If corresponding inputs are not Python scalars and are not directly related.
AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have
different types.
AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match.
AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match.
AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`.
AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same
:attr:`~torch.Tensor.layout`.
AssertionError: If only one of corresponding tensors is quantized.
AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s.
AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same
:attr:`~torch.Tensor.device`.
AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``.
AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride.
AssertionError: If the values of corresponding tensors are not close according to the definition above.
The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching
``dtype``'s, the maximum of both tolerances is used.
+---------------------------+------------+----------+
| ``dtype`` | ``rtol`` | ``atol`` |
+===========================+============+==========+
| :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` |
+---------------------------+------------+----------+
| :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` |
+---------------------------+------------+----------+
| :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` |
+---------------------------+------------+----------+
| other | ``0.0`` | ``0.0`` |
+---------------------------+------------+----------+
.. note::
:func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged
to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might
define an ``assert_equal`` that uses zero tolrances for every ``dtype`` by default:
>>> import functools
>>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0)
>>> assert_equal(1e-9, 1e-10)
Traceback (most recent call last):
...
AssertionError: Scalars are not equal!
<BLANKLINE>
Absolute difference: 9.000000000000001e-10
Relative difference: 9.0
Examples:
>>> # tensor to tensor comparison
>>> expected = torch.tensor([1e0, 1e-1, 1e-2])
>>> actual = torch.acos(torch.cos(expected))
>>> torch.testing.assert_close(actual, expected)
>>> # scalar to scalar comparison
>>> import math
>>> expected = math.sqrt(2.0)
>>> actual = 2.0 / math.sqrt(2.0)
>>> torch.testing.assert_close(actual, expected)
>>> # numpy array to numpy array comparison
>>> import numpy as np
>>> expected = np.array([1e0, 1e-1, 1e-2])
>>> actual = np.arccos(np.cos(expected))
>>> torch.testing.assert_close(actual, expected)
>>> # sequence to sequence comparison
>>> import numpy as np
>>> # The types of the sequences do not have to match. They only have to have the same
>>> # length and their elements have to match.
>>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)]
>>> actual = tuple(expected)
>>> torch.testing.assert_close(actual, expected)
>>> # mapping to mapping comparison
>>> from collections import OrderedDict
>>> import numpy as np
>>> foo = torch.tensor(1.0)
>>> bar = 2.0
>>> baz = np.array(3.0)
>>> # The types and a possible ordering of mappings do not have to match. They only
>>> # have to have the same set of keys and their elements have to match.
>>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)])
>>> actual = {"baz": baz, "bar": bar, "foo": foo}
>>> torch.testing.assert_close(actual, expected)
>>> expected = torch.tensor([1.0, 2.0, 3.0])
>>> actual = expected.clone()
>>> # By default, directly related instances can be compared
>>> torch.testing.assert_close(torch.nn.Parameter(actual), expected)
>>> # This check can be made more strict with allow_subclasses=False
>>> torch.testing.assert_close(
... torch.nn.Parameter(actual), expected, allow_subclasses=False
... )
Traceback (most recent call last):
...
TypeError: No comparison pair was able to handle inputs of type
<class 'torch.nn.parameter.Parameter'> and <class 'torch.Tensor'>.
>>> # If the inputs are not directly related, they are never considered close
>>> torch.testing.assert_close(actual.numpy(), expected)
Traceback (most recent call last):
...
TypeError: No comparison pair was able to handle inputs of type <class 'numpy.ndarray'>
and <class 'torch.Tensor'>.
>>> # Exceptions to these rules are Python scalars. They can be checked regardless of
>>> # their type if check_dtype=False.
>>> torch.testing.assert_close(1.0, 1, check_dtype=False)
>>> # NaN != NaN by default.
>>> expected = torch.tensor(float("Nan"))
>>> actual = expected.clone()
>>> torch.testing.assert_close(actual, expected)
Traceback (most recent call last):
...
AssertionError: Scalars are not close!
<BLANKLINE>
Absolute difference: nan (up to 1e-05 allowed)
Relative difference: nan (up to 1.3e-06 allowed)
>>> torch.testing.assert_close(actual, expected, equal_nan=True)
>>> expected = torch.tensor([1.0, 2.0, 3.0])
>>> actual = torch.tensor([1.0, 4.0, 5.0])
>>> # The default error message can be overwritten.
>>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!")
Traceback (most recent call last):
...
AssertionError: Argh, the tensors are not close!
>>> # If msg is a callable, it can be used to augment the generated message with
>>> # extra information
>>> torch.testing.assert_close(
... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter"
... )
Traceback (most recent call last):
...
AssertionError: Header
<BLANKLINE>
Tensor-likes are not close!
<BLANKLINE>
Mismatched elements: 2 / 3 (66.7%)
Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed)
Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed)
<BLANKLINE>
Footer
"""
# Hide this function from `pytest`'s traceback
__tracebackhide__ = True
assert_equal(
actual,
expected,
pair_types=(
NonePair,
BooleanPair,
NumberPair,
TensorLikePair,
),
allow_subclasses=allow_subclasses,
rtol=rtol,
atol=atol,
equal_nan=equal_nan,
check_device=check_device,
check_dtype=check_dtype,
check_layout=check_layout,
check_stride=check_stride,
msg=msg,
)
|
pytorch-master
|
torch/testing/_comparison.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.