response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Returns imaginary parts of all elements in `a`. Uses `tf.imag`. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. Returns: An ndarray with the same shape as `a`.
def imag(a): """Returns imaginary parts of all elements in `a`. Uses `tf.imag`. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. Returns: An ndarray with the same shape as `a`. """ a = asarray(a) # TODO(srbs): np.imag returns a scalar if a is a scalar, whereas we always # return an ndarray. return utils.tensor_to_ndarray(tf.math.imag(a.data))
A general reduction function. Args: tf_fn: the TF reduction function. a: the array to be reduced. axis: (optional) the axis along which to do the reduction. If None, all dimensions are reduced. dtype: (optional) the dtype of the result. keepdims: (optional) whether to keep the reduced dimension(s). promote_int: how to promote integer and bool inputs. There are three choices: (1) _TO_INT64: always promote them to int64 or uint64; (2) _TO_FLOAT: always promote them to a float type (determined by dtypes.default_float_type); (3) None: don't promote. tf_bool_fn: (optional) the TF reduction function for bool inputs. It will only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype is `np.bool_` and `preserve_bool` is True. preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype is `np.bool_` (some reductions such as np.sum convert bools to integers, while others such as np.max preserve bools. Returns: An ndarray.
def _reduce(tf_fn, a, axis=None, dtype=None, keepdims=None, promote_int=_TO_INT64, tf_bool_fn=None, preserve_bool=False): """A general reduction function. Args: tf_fn: the TF reduction function. a: the array to be reduced. axis: (optional) the axis along which to do the reduction. If None, all dimensions are reduced. dtype: (optional) the dtype of the result. keepdims: (optional) whether to keep the reduced dimension(s). promote_int: how to promote integer and bool inputs. There are three choices: (1) _TO_INT64: always promote them to int64 or uint64; (2) _TO_FLOAT: always promote them to a float type (determined by dtypes.default_float_type); (3) None: don't promote. tf_bool_fn: (optional) the TF reduction function for bool inputs. It will only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype is `np.bool_` and `preserve_bool` is True. preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype is `np.bool_` (some reductions such as np.sum convert bools to integers, while others such as np.max preserve bools. Returns: An ndarray. """ if dtype: dtype = utils.result_type(dtype) if keepdims is None: keepdims = False a = asarray(a, dtype=dtype) if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_) and tf_bool_fn is not None): return utils.tensor_to_ndarray( tf_bool_fn(input_tensor=a.data, axis=axis, keepdims=keepdims)) if dtype is None: dtype = a.dtype if np.issubdtype(dtype, np.integer) or dtype == np.bool_: if promote_int == _TO_INT64: # If a is an integer/bool type and whose bit width is less than 64, # numpy up-casts it to 64-bit. if dtype == np.bool_: is_signed = True width = 8 # We can use any number here that is less than 64 else: is_signed = np.issubdtype(dtype, np.signedinteger) width = np.iinfo(dtype).bits if width < 64: if is_signed: dtype = np.int64 else: dtype = np.uint64 a = a.astype(dtype) elif promote_int == _TO_FLOAT: a = a.astype(dtypes.default_float_type()) return utils.tensor_to_ndarray( tf_fn(input_tensor=a.data, axis=axis, keepdims=keepdims))
Returns real parts of all elements in `a`. Uses `tf.real`. Args: val: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. Returns: An ndarray with the same shape as `a`.
def real(val): """Returns real parts of all elements in `a`. Uses `tf.real`. Args: val: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. Returns: An ndarray with the same shape as `a`. """ val = asarray(val) # TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always # return an ndarray. return utils.tensor_to_ndarray(tf.math.real(val.data))
order argument can only b 'C' or 'F'.
def reshape(a, newshape, order='C'): """order argument can only b 'C' or 'F'.""" if order not in {'C', 'F'}: raise ValueError('Unsupported order argument {}'.format(order)) a = asarray(a) if isinstance(newshape, arrays_lib.ndarray): newshape = newshape.data if isinstance(newshape, int): newshape = [newshape] if order == 'F': r = tf.transpose(tf.reshape(tf.transpose(a.data), newshape[::-1])) else: r = tf.reshape(a.data, newshape) return utils.tensor_to_ndarray(r)
Expand the shape of an array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: int. axis on which to expand the shape. Returns: An ndarray with the contents and dtype of `a` and shape expanded on axis.
def expand_dims(a, axis): """Expand the shape of an array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: int. axis on which to expand the shape. Returns: An ndarray with the contents and dtype of `a` and shape expanded on axis. """ a = asarray(a) return utils.tensor_to_ndarray(tf.expand_dims(a.data, axis=axis))
Removes single-element axes from the array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: scalar or list/tuple of ints. TODO(srbs): tf.squeeze throws error when axis is a Tensor eager execution is enabled. So we cannot allow axis to be array_like here. Fix. Returns: An ndarray.
def squeeze(a, axis=None): """Removes single-element axes from the array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axis: scalar or list/tuple of ints. TODO(srbs): tf.squeeze throws error when axis is a Tensor eager execution is enabled. So we cannot allow axis to be array_like here. Fix. Returns: An ndarray. """ a = asarray(a) return utils.tensor_to_ndarray(tf.squeeze(a, axis))
Permutes dimensions of the array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axes: array_like. A list of ints with length rank(a) or None specifying the order of permutation. The i'th dimension of the output array corresponds to axes[i]'th dimension of the `a`. If None, the axes are reversed. Returns: An ndarray.
def transpose(a, axes=None): """Permutes dimensions of the array. Args: a: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. axes: array_like. A list of ints with length rank(a) or None specifying the order of permutation. The i'th dimension of the output array corresponds to axes[i]'th dimension of the `a`. If None, the axes are reversed. Returns: An ndarray. """ a = asarray(a) if axes is not None: axes = asarray(axes) return utils.tensor_to_ndarray(tf.transpose(a=a.data, perm=axes))
Raises ValueError if source, destination not in (-ndim(a), ndim(a)).
def moveaxis(a, source, destination): # pylint: disable=missing-docstring """Raises ValueError if source, destination not in (-ndim(a), ndim(a)).""" if not source and not destination: return a a = asarray(a).data if isinstance(source, int): source = (source,) if isinstance(destination, int): destination = (destination,) a_rank = utils._maybe_static(tf.rank(a)) # pylint: disable=protected-access def _correct_axis(axis, rank): if axis < 0: return axis + rank return axis source = tuple(_correct_axis(axis, a_rank) for axis in source) destination = tuple(_correct_axis(axis, a_rank) for axis in destination) if a.shape.rank is not None: perm = [i for i in range(a_rank) if i not in source] for dest, src in sorted(zip(destination, source)): assert dest <= len(perm) perm.insert(dest, src) else: r = tf.range(a_rank) def _remove_indices(a, b): """Remove indices (`b`) from `a`.""" items = tf.unstack(tf.sort(tf.stack(b)), num=len(b)) i = 0 result = [] for item in items: result.append(a[i:item]) i = item + 1 result.append(a[i:]) return tf.concat(result, 0) minus_sources = _remove_indices(r, source) minus_dest = _remove_indices(r, destination) perm = tf.scatter_nd(tf.expand_dims(minus_dest, 1), minus_sources, [a_rank]) perm = tf.tensor_scatter_nd_update(perm, tf.expand_dims(destination, 1), source) a = tf.transpose(a, perm) return utils.tensor_to_ndarray(a)
Sets the `value` at `index` in the array `arr`. This works by replacing the slice at `index` in the tensor with `value`. Since tensors are immutable, this builds a new tensor using the `tf.concat` op. Currently, only 0-d and 1-d indices are supported. Note that this may break gradients e.g. a = tf_np.array([1, 2, 3]) old_a_t = a.data with tf.GradientTape(persistent=True) as g: g.watch(a.data) b = a * 2 a[0] = 5 g.gradient(b.data, [a.data]) # [None] g.gradient(b.data, [old_a_t]) # [[2., 2., 2.]] Here `d_b / d_a` is `[None]` since a.data no longer points to the same tensor. Args: arr: array_like. index: scalar or 1-d integer array. value: value to set at index. Returns: ndarray Raises: ValueError: if `index` is not a scalar or 1-d array.
def _setitem(arr, index, value): """Sets the `value` at `index` in the array `arr`. This works by replacing the slice at `index` in the tensor with `value`. Since tensors are immutable, this builds a new tensor using the `tf.concat` op. Currently, only 0-d and 1-d indices are supported. Note that this may break gradients e.g. a = tf_np.array([1, 2, 3]) old_a_t = a.data with tf.GradientTape(persistent=True) as g: g.watch(a.data) b = a * 2 a[0] = 5 g.gradient(b.data, [a.data]) # [None] g.gradient(b.data, [old_a_t]) # [[2., 2., 2.]] Here `d_b / d_a` is `[None]` since a.data no longer points to the same tensor. Args: arr: array_like. index: scalar or 1-d integer array. value: value to set at index. Returns: ndarray Raises: ValueError: if `index` is not a scalar or 1-d array. """ # TODO(srbs): Figure out a solution to the gradient problem. arr = asarray(arr) index = asarray(index) if index.ndim == 0: index = ravel(index) elif index.ndim > 1: raise ValueError('index must be a scalar or a 1-d array.') value = asarray(value, dtype=arr.dtype) if arr.shape[len(index):] != value.shape: value = full(arr.shape[len(index):], value) prefix_t = arr.data[:index.data[0]] postfix_t = arr.data[index.data[0] + 1:] if len(index) == 1: arr._data = tf.concat( # pylint: disable=protected-access [prefix_t, tf.expand_dims(value.data, 0), postfix_t], 0) else: subarray = arr[index.data[0]] _setitem(subarray, index[1:], value) arr._data = tf.concat( # pylint: disable=protected-access [prefix_t, tf.expand_dims(subarray.data, 0), postfix_t], 0)
Pads an array. Args: ary: array_like of rank N. Input array. pad_width: {sequence, array_like, int}. Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode: string. One of the following string values: 'constant' Pads with a constant value. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. **NOTE**: The supported list of `mode` does not match that of numpy's. constant_values: scalar with same dtype as `array`. Used in 'constant' mode as the pad value. Default is 0. Returns: An ndarray padded array of rank equal to `array` with shape increased according to `pad_width`. Raises: ValueError if `mode` is not supported.
def pad(ary, pad_width, mode, constant_values=0): """Pads an array. Args: ary: array_like of rank N. Input array. pad_width: {sequence, array_like, int}. Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode: string. One of the following string values: 'constant' Pads with a constant value. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. **NOTE**: The supported list of `mode` does not match that of numpy's. constant_values: scalar with same dtype as `array`. Used in 'constant' mode as the pad value. Default is 0. Returns: An ndarray padded array of rank equal to `array` with shape increased according to `pad_width`. Raises: ValueError if `mode` is not supported. """ if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'): raise ValueError('Unsupported padding mode: ' + mode) mode = mode.upper() ary = asarray(ary) pad_width = asarray(pad_width, dtype=tf.int32) return utils.tensor_to_ndarray(tf.pad( tensor=ary.data, paddings=pad_width.data, mode=mode, constant_values=constant_values))
out argument is not supported, and default mode is clip.
def take(a, indices, axis=None, out=None, mode='clip'): """out argument is not supported, and default mode is clip.""" if out is not None: raise ValueError('out argument is not supported in take.') if mode not in {'raise', 'clip', 'wrap'}: raise ValueError("Invalid mode '{}' for take".format(mode)) a = asarray(a).data indices = asarray(indices).data if axis is None: a = tf.reshape(a, [-1]) axis = 0 axis_size = tf.shape(a, indices.dtype)[axis] if mode == 'clip': indices = tf.clip_by_value(indices, 0, axis_size-1) elif mode == 'wrap': indices = tf.math.floormod(indices, axis_size) else: raise ValueError("The 'raise' mode to take is not supported.") return utils.tensor_to_ndarray(tf.gather(a, indices, axis=axis))
Raises ValueError if exactly one of x or y is not None.
def where(condition, x=None, y=None): """Raises ValueError if exactly one of x or y is not None.""" condition = asarray(condition, dtype=np.bool_) if x is None and y is None: return nonzero(condition) elif x is not None and y is not None: x, y = _promote_dtype(x, y) return utils.tensor_to_ndarray(tf.where(condition.data, x.data, y.data)) raise ValueError('Both x and y must be ndarrays, or both must be None.')
Return the shape of an array. Args: a: array_like. Input array. Returns: Tuple of ints.
def shape(a): """Return the shape of an array. Args: a: array_like. Input array. Returns: Tuple of ints. """ a = asarray(a) return a.shape
Converting boundaries of splits to sizes of splits. Args: a: the array to be split. boundaries: the boundaries, as in np.split. axis: the axis along which to split. Returns: A list of sizes of the splits, as in tf.split.
def _boundaries_to_sizes(a, boundaries, axis): """Converting boundaries of splits to sizes of splits. Args: a: the array to be split. boundaries: the boundaries, as in np.split. axis: the axis along which to split. Returns: A list of sizes of the splits, as in tf.split. """ if axis >= len(a.shape): raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape)) total_size = a.shape[axis] sizes = [] sizes_sum = 0 prev = 0 for i, b in enumerate(boundaries): size = b - prev if size < 0: raise ValueError('The %s-th boundary %s is smaller than the previous ' 'boundary %s' % (i, b, prev)) size = min(size, max(0, total_size - sizes_sum)) sizes.append(size) sizes_sum += size prev = b sizes.append(max(0, total_size - sizes_sum)) return sizes
Reshape arrays to be at least `n`-dimensional. Args: n: The minimal rank. new_shape: a function that takes `n` and the old shape and returns the desired new shape. *arys: ndarray(s) to be reshaped. Returns: The reshaped array(s).
def _atleast_nd(n, new_shape, *arys): """Reshape arrays to be at least `n`-dimensional. Args: n: The minimal rank. new_shape: a function that takes `n` and the old shape and returns the desired new shape. *arys: ndarray(s) to be reshaped. Returns: The reshaped array(s). """ def f(x): # pylint: disable=g-long-lambda x = asarray(x) return asarray( utils.cond( utils.greater(n, tf.rank(x)), lambda: reshape(x, new_shape(n, tf.shape(x.data))).data, lambda: x.data)) arys = list(map(f, arys)) if len(arys) == 1: return arys[0] else: return arys
Gets the default float type. Returns: If `is_allow_float64()` is true, returns float64; otherwise returns float32.
def default_float_type(): """Gets the default float type. Returns: If `is_allow_float64()` is true, returns float64; otherwise returns float32. """ if is_allow_float64(): return float64 else: return float32
Computes the tf_fn(x) for each element in `x`. Args: tf_fn: function that takes a single Tensor argument. x: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. promote_to_float: whether to cast the argument to a float dtype (`dtypes.default_float_type`) if it is not already. Returns: An ndarray with the same shape as `x`. The default output dtype is determined by `dtypes.default_float_type`, unless x is an ndarray with a floating point type, in which case the output type is same as x.dtype.
def _scalar(tf_fn, x, promote_to_float=False): """Computes the tf_fn(x) for each element in `x`. Args: tf_fn: function that takes a single Tensor argument. x: array_like. Could be an ndarray, a Tensor or any object that can be converted to a Tensor using `tf.convert_to_tensor`. promote_to_float: whether to cast the argument to a float dtype (`dtypes.default_float_type`) if it is not already. Returns: An ndarray with the same shape as `x`. The default output dtype is determined by `dtypes.default_float_type`, unless x is an ndarray with a floating point type, in which case the output type is same as x.dtype. """ x = array_ops.asarray(x) if promote_to_float and not np.issubdtype(x.dtype, np.inexact): x = x.astype(dtypes.default_float_type()) return utils.tensor_to_ndarray(tf_fn(x.data))
Helper to generate nan* functions.
def _make_nan_reduction(onp_reduction, reduction, init_val): """Helper to generate nan* functions.""" @utils.np_doc(onp_reduction) def nan_reduction(a, axis=None, dtype=None, keepdims=False): a = array_ops.array(a) v = array_ops.array(init_val, dtype=a.dtype) return reduction( array_ops.where(isnan(a), v, a), axis=axis, dtype=dtype, keepdims=keepdims) return nan_reduction
This currently requires copy=True and sparse=False.
def meshgrid(*xi, **kwargs): """This currently requires copy=True and sparse=False.""" sparse = kwargs.get('sparse', False) if sparse: raise ValueError('tf.numpy doesnt support returning sparse arrays yet') copy = kwargs.get('copy', True) if not copy: raise ValueError('tf.numpy only supports copy=True') indexing = kwargs.get('indexing', 'xy') xi = [array_ops.asarray(arg).data for arg in xi] kwargs = {'indexing': indexing} outputs = tf.meshgrid(*xi, **kwargs) outputs = [utils.tensor_to_ndarray(output) for output in outputs] return outputs
Returns samples from a normal distribution. Uses `tf.random_normal`. Args: *args: The shape of the output array. Returns: An ndarray with shape `args` and dtype `float64`.
def randn(*args): """Returns samples from a normal distribution. Uses `tf.random_normal`. Args: *args: The shape of the output array. Returns: An ndarray with shape `args` and dtype `float64`. """ # TODO(wangpeng): Use new stateful RNG if utils.isscalar(args): args = (args,) return utils.tensor_to_ndarray( tf.random.normal(args, dtype=DEFAULT_RANDN_DTYPE))
Sets the seed for the random number generator. Uses `tf.set_random_seed`. Args: s: an integer.
def seed(s): """Sets the seed for the random number generator. Uses `tf.set_random_seed`. Args: s: an integer. """ # TODO(wangpeng): make the signature the same as numpy tf.random.set_seed(s)
Converts a native python or numpy type to TF DType. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A tensorflow `DType`.
def _to_tf_type(dtype): """Converts a native python or numpy type to TF DType. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A tensorflow `DType`. """ return tf.as_dtype(dtype)
Converts a native python or TF DType to numpy type. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A NumPy `dtype`.
def _to_numpy_type(dtype): """Converts a native python or TF DType to numpy type. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A NumPy `dtype`. """ if isinstance(dtype, tf.DType): return dtype.as_numpy_dtype return np.dtype(dtype)
Returns properties of floating point types. Note that currently it just forwards to the numpy namesake, while tensorflow and numpy dtypes may have different properties. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A class describing properties of `dtype`, as described by https://docs.scipy.org/doc/numpy/reference/generated/numpy.finfo.html
def finfo(dtype): """Returns properties of floating point types. Note that currently it just forwards to the numpy namesake, while tensorflow and numpy dtypes may have different properties. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A class describing properties of `dtype`, as described by https://docs.scipy.org/doc/numpy/reference/generated/numpy.finfo.html """ return np.finfo(_to_numpy_type(dtype))
Returns whether `val` is a scalar value or scalar Tensor.
def isscalar(val): """Returns whether `val` is a scalar value or scalar Tensor.""" if isinstance(val, (np.ndarray, arrays.ndarray, tf.Tensor)): return len(val.shape) == 0 # pylint: disable=g-explicit-length-test return np.isscalar(val)
Returns the type resulting from applying NumPy type promotion to arguments. Args: *arrays_and_dtypes: A list of array_like objects or dtypes. Returns: A numpy dtype.
def result_type(*arrays_and_dtypes): """Returns the type resulting from applying NumPy type promotion to arguments. Args: *arrays_and_dtypes: A list of array_like objects or dtypes. Returns: A numpy dtype. """ def maybe_get_dtype(x): # Don't put np.ndarray in this list, because np.result_type looks at the # value (not just dtype) of np.ndarray to decide the result type. if isinstance(x, (arrays.ndarray, arrays.ShardedNdArray, tf.Tensor, tf.IndexedSlices)): return _to_numpy_type(x.dtype) elif isinstance(x, tf.DType): return _to_numpy_type(x) return x arrays_and_dtypes = [maybe_get_dtype(x) for x in tf.nest.flatten(arrays_and_dtypes)] if not arrays_and_dtypes: # If arrays_and_dtypes is an empty list, let numpy decide what the dtype is. arrays_and_dtypes = [np.asarray([])] return dtypes._result_type(*arrays_and_dtypes)
Returns the type resulting from applying NumPy type promotion. Args: type1: A numpy type. type2: A numpy type. Returns: A numpy type.
def promote_types(type1, type2): """Returns the type resulting from applying NumPy type promotion. Args: type1: A numpy type. type2: A numpy type. Returns: A numpy type. """ type1 = _to_numpy_type(type1) type2 = _to_numpy_type(type2) return dtypes.canonicalize_dtype(np.promote_types(type1, type2))
An enhanced funcsigs.signature that can handle numpy.ufunc.
def _np_signature(f): """An enhanced funcsigs.signature that can handle numpy.ufunc.""" if not isinstance(f, np.ufunc): try: return funcsigs.signature(f) except ValueError: return None def names_from_num(prefix, n): if n <= 0: return [] elif n == 1: return [prefix] else: return [prefix + str(i + 1) for i in range(n)] input_names = names_from_num('x', f.nin) output_names = names_from_num('out', f.nout) keyword_only_params = [ ('where', True), ('casting', 'same_kind'), ('order', 'K'), ('dtype', None), ('subok', True), ('signature', None), ('extobj', None)] params = [] params += [funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_ONLY) for name in input_names] if f.nout > 1: params += [funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_ONLY, default=None) for name in output_names] params += [funcsigs.Parameter( 'out', funcsigs.Parameter.POSITIONAL_OR_KEYWORD, default=None if f.nout == 1 else (None,) * f.nout)] params += [funcsigs.Parameter(name, funcsigs.Parameter.KEYWORD_ONLY, default=default) for name, default in keyword_only_params] return funcsigs.Signature(params)
Attachs numpy docstring to a function. Args: np_fun: the numpy function whose docstring will be used. Returns: A function decorator that attaches the docstring from `np_fun` to the decorated function.
def np_doc(np_fun): """Attachs numpy docstring to a function. Args: np_fun: the numpy function whose docstring will be used. Returns: A function decorator that attaches the docstring from `np_fun` to the decorated function. """ np_sig = _np_signature(np_fun) def decorator(f): """The decorator.""" unsupported_params = [] if np_sig is not None: sig = funcsigs.signature(f) for name in np_sig.parameters: if name not in sig.parameters: unsupported_params.append(name) f.__doc__ = _np_doc_helper(f, np_fun, unsupported_params) return f return decorator
Helper to get docs.
def _np_doc_helper(f, np_f, unsupported_params=None): """Helper to get docs.""" if not unsupported_params and not _has_docstring(f) and _has_docstring(np_f): return np_f.__doc__ doc = 'TensorFlow variant of `numpy.%s`.\n\n' % np_f.__name__ if unsupported_params: doc += 'Unsupported arguments: ' + ', '.join( '`' + name + '`' for name in unsupported_params) + '.\n\n' if _has_docstring(f): doc += f.__doc__ doc = _add_blank_line(doc) if _has_docstring(np_f): doc += 'Documentation for `numpy.%s`:\n\n' % np_f.__name__ doc += np_f.__doc__ return doc
Attachs numpy docstring to a function. This differs from np_doc in that it doesn't check for a match in signature. Args: np_f: the numpy function whose docstring will be used. Returns: A function decorator that attaches the docstring from `np_f` to the decorated function.
def np_doc_only(np_f): """Attachs numpy docstring to a function. This differs from np_doc in that it doesn't check for a match in signature. Args: np_f: the numpy function whose docstring will be used. Returns: A function decorator that attaches the docstring from `np_f` to the decorated function. """ def decorator(f): f.__doc__ = _np_doc_helper(f, np_f) return f return decorator
Broadcast tensors. Args: *args: a list of tensors whose shapes are broadcastable against each other. Returns: Tensors broadcasted to the common shape.
def tf_broadcast(*args): """Broadcast tensors. Args: *args: a list of tensors whose shapes are broadcastable against each other. Returns: Tensors broadcasted to the common shape. """ if len(args) <= 1: return args sh = tf.shape(args[0]) for arg in args[1:]: sh = tf.broadcast_dynamic_shape(sh, tf.shape(arg)) return [tf.broadcast_to(arg, sh) for arg in args]
A version of tf.get_static_value that returns None on float dtypes. It returns None on float dtypes in order to avoid breaking gradients. Args: x: a tensor. Returns: Same as `tf.get_static_value`, except that it returns None when `x` has a float dtype.
def get_static_value(x): """A version of tf.get_static_value that returns None on float dtypes. It returns None on float dtypes in order to avoid breaking gradients. Args: x: a tensor. Returns: Same as `tf.get_static_value`, except that it returns None when `x` has a float dtype. """ if isinstance(x, tf.Tensor) and (x.dtype.is_floating or x.dtype.is_complex): return None return tf.get_static_value(x)
A version of tf.cond that tries to evaluate the condition.
def cond(pred, true_fn, false_fn): """A version of tf.cond that tries to evaluate the condition.""" v = get_static_value(pred) if v is None: return tf.cond(pred, true_fn, false_fn) if v: return true_fn() else: return false_fn()
A version of tf.add that eagerly evaluates if possible.
def add(a, b): """A version of tf.add that eagerly evaluates if possible.""" return _maybe_static(a) + _maybe_static(b)
A version of tf.subtract that eagerly evaluates if possible.
def subtract(a, b): """A version of tf.subtract that eagerly evaluates if possible.""" return _maybe_static(a) - _maybe_static(b)
A version of tf.greater that eagerly evaluates if possible.
def greater(a, b): """A version of tf.greater that eagerly evaluates if possible.""" return _maybe_static(a) > _maybe_static(b)
A version of tf.greater_equal that eagerly evaluates if possible.
def greater_equal(a, b): """A version of tf.greater_equal that eagerly evaluates if possible.""" return _maybe_static(a) >= _maybe_static(b)
A version of tf.less_equal that eagerly evaluates if possible.
def less_equal(a, b): """A version of tf.less_equal that eagerly evaluates if possible.""" return _maybe_static(a) <= _maybe_static(b)
A version of tf.logical_and that eagerly evaluates if possible.
def logical_and(a, b): """A version of tf.logical_and that eagerly evaluates if possible.""" a_value = get_static_value(a) if a_value is not None: if np.isscalar(a_value): if a_value: return _maybe_static(b) else: return a_value else: return a_value & _maybe_static(b) else: return a & _maybe_static(b)
A version of tf.logical_or that eagerly evaluates if possible.
def logical_or(a, b): """A version of tf.logical_or that eagerly evaluates if possible.""" a_value = get_static_value(a) if a_value is not None: if np.isscalar(a_value): if a_value: return a_value else: return _maybe_static(b) else: return a_value | _maybe_static(b) else: return a | _maybe_static(b)
A version of __getitem__ that eagerly evaluates if possible.
def getitem(a, slice_spec): """A version of __getitem__ that eagerly evaluates if possible.""" return _maybe_static(a)[slice_spec]
A version of tf.reduce_all that eagerly evaluates if possible.
def reduce_all(input_tensor, axis=None, keepdims=False): """A version of tf.reduce_all that eagerly evaluates if possible.""" v = get_static_value(input_tensor) if v is None: return tf.reduce_all(input_tensor, axis=axis, keepdims=keepdims) else: return v.all(axis=axis, keepdims=keepdims)
A version of tf.reduce_any that eagerly evaluates if possible.
def reduce_any(input_tensor, axis=None, keepdims=False): """A version of tf.reduce_any that eagerly evaluates if possible.""" v = get_static_value(input_tensor) if v is None: return tf.reduce_any(input_tensor, axis=axis, keepdims=keepdims) else: return v.any(axis=axis, keepdims=keepdims)
Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset.
def build_dataset( tokenizer, dataset_name="lvwerra/stack-exchange-paired", ): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ num_proc = 24 def preprocess_function(examples): new_examples = { "query": [], "input_ids": [], } for question in examples["question"]: query = "Question: " + question + "\n\nAnswer: " tokenized_question = tokenizer(query, truncation=True) new_examples["query"].append(query) new_examples["input_ids"].append(tokenized_question["input_ids"]) return new_examples ds = train_dataset.map( preprocess_function, batched=True, num_proc=num_proc, remove_columns=original_columns, ) ds = ds.filter(lambda x: len(x["input_ids"]) < 512, batched=False) ds.set_format(type="torch") return ds
Estimate the average number of characters per token in the dataset.
def chars_token_ratio(dataset, tokenizer, nb_examples=400): """ Estimate the average number of characters per token in the dataset. """ total_characters, total_tokens = 0, 0 for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples): text = prepare_sample_text(example) total_characters += len(text) if tokenizer.is_fast: total_tokens += len(tokenizer(text).tokens()) else: total_tokens += len(tokenizer.tokenize(text)) return total_characters / total_tokens
Prints the number of trainable parameters in the model.
def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" )
Prepare the text from a sample of the dataset.
def prepare_sample_text(example): """Prepare the text from a sample of the dataset.""" text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}" return text
Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format. The dataset is converted to a dictionary with the following structure: { 'prompt': List[str], 'chosen': List[str], 'rejected': List[str], } Prompts are structured as follows: "Question: " + <prompt> + " Answer: "
def get_stack_exchange_paired( data_dir: str = "data/rl", sanity_check: bool = False, cache_dir: Optional[str] = None, num_proc=24, ) -> Dataset: """Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format. The dataset is converted to a dictionary with the following structure: { 'prompt': List[str], 'chosen': List[str], 'rejected': List[str], } Prompts are structured as follows: "Question: " + <prompt> + "\n\nAnswer: " """ dataset = load_dataset( "lvwerra/stack-exchange-paired", split="train", cache_dir=cache_dir, data_dir=data_dir, ) original_columns = dataset.column_names if sanity_check: dataset = dataset.select(range(min(len(dataset), 1000))) def return_prompt_and_responses(samples) -> Dict[str, str]: return { "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]], "chosen": samples["response_j"], "rejected": samples["response_k"], } return dataset.map( return_prompt_and_responses, batched=True, num_proc=num_proc, remove_columns=original_columns, )
Estimate the average number of characters per token in the dataset.
def chars_token_ratio(dataset, tokenizer, nb_examples=400): """ Estimate the average number of characters per token in the dataset. """ total_characters, total_tokens = 0, 0 for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples): text = prepare_sample_text(example) total_characters += len(text) if tokenizer.is_fast: total_tokens += len(tokenizer(text).tokens()) else: total_tokens += len(tokenizer.tokenize(text)) return total_characters / total_tokens
Prints the number of trainable parameters in the model.
def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" )
Prepare the text from a sample of the dataset.
def prepare_sample_text(example): """Prepare the text from a sample of the dataset.""" text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}" return text
Generate random arithmetic tasks and answers.
def generate_data(n): """Generate random arithmetic tasks and answers.""" tasks, answers = [], [] for _ in range(n): a = np.random.randint(0, 50) b = np.random.randint(0, 50) op = np.random.choice(["-", "+", "*"]) tasks.append(f"\n\nWhat is {a} {op} {b}?") if op == "-": answers.append(a - b) elif op == "+": answers.append(a + b) else: answers.append(a * b) return tasks, answers
Reward if generated response contains correct answer.
def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] pattern = r"Result\s*=\s*(-?\d+(?:\.\d+)?)\s*<submit>" # generated by chatGPT for response, answer in zip(responses, answers): reward = 0.0 predicted_number = None match_pattern = re.findall(pattern, response) if match_pattern: predicted_number = float(match_pattern[0]) if predicted_number is not None: if np.abs(predicted_number - answer) < 0.01: reward += 1.0 rewards.append(torch.tensor(reward)) return rewards
Reward if generated response contains correct answer.
def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] pattern = r"Result\s*=\s*(-?\d+(?:\.\d+)?)\s*<submit>" # generated by chatGPT for response, answer in zip(responses, answers): reward = 0.0 try: predicted_number = None match_pattern = re.findall(pattern, response) if match_pattern: predicted_number = float(match_pattern[0]) if predicted_number is not None: if np.abs(predicted_number - float(answer)) < 0.1: reward += 1.0 except Exception: pass rewards.append(torch.tensor(reward)) return rewards
Reward if generated response contains correct answer.
def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] for response, answer in zip(responses, answers): reward = 0.0 for a in answer: if a.lower() in response.lower(): reward += 1.0 break rewards.append(torch.tensor(reward)) return rewards
Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset.
def build_dataset( config, dataset_name="allenai/real-toxicity-prompts", input_min_text_length=5, input_max_text_length=10 ): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token ds = load_dataset(dataset_name, split="train") def filter_fn(sample): toxicity = sample["prompt"]["toxicity"] return toxicity is not None and toxicity > 0.3 ds = ds.filter(filter_fn, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): prompt = sample["prompt"]["text"] continuation = sample["continuation"]["text"] sample["input_ids"] = tokenizer.encode(prompt + continuation)[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") ds = ds.train_test_split(test_size=0.2, shuffle=False)["train"] return ds
Filter `llm_name` completions and binarize given their helpfulness score. If helpfulness score is 5, it is desirable. Otherwise, it is undesirable.
def build_helpfulness_dataset(llm_name: str) -> Dataset: """ Filter `llm_name` completions and binarize given their helpfulness score. If helpfulness score is 5, it is desirable. Otherwise, it is undesirable. """ def get_model_rating(example, metric: str, llm_name: str): try: model_index = example["models"].index(llm_name) return {metric: int(example["completions"][model_index]["annotations"][metric]["Rating"])} except ValueError as e: logging.warning(e) return -1 def get_model_response(example, llm_name: str): try: model_index = example["models"].index(llm_name) return {"response": example["completions"][model_index]["response"]} except ValueError as e: logging.warning(e) return -1 dataset = load_dataset("openbmb/UltraFeedback")["train"] ds = dataset.filter(lambda example: llm_name in example["models"], batched=False, num_proc=8) ds = ds.filter(lambda example: len(example["models"]) == len(example["completions"]), batched=False, num_proc=8) METRIC = "helpfulness" ds = ds.map( get_model_rating, batched=False, num_proc=8, fn_kwargs={"metric": METRIC, "llm_name": llm_name}, ) ds = ds.map( get_model_response, batched=False, num_proc=8, fn_kwargs={"llm_name": llm_name}, ) ds = ds.select_columns(["source", "instruction", "response", "helpfulness"]) ds = ds.rename_columns({"instruction": "prompt", "response": "completion"}) ds = ds.map(lambda example: {"label": example["helpfulness"] >= 5}, batched=False, num_proc=8) ds = ds.map( lambda example: {"prompt": [{"role": "user", "content": example["prompt"]}]}, batched=False, num_proc=8, ) dataset = ds.train_test_split(test_size=0.05, seed=42) return dataset
Borrowed from https://huggingface.co/nomic-ai/nomic-embed-text-v1.5#transformers
def embed_prompt(input_ids: torch.LongTensor, attention_mask: torch.LongTensor, model: PreTrainedModel): """ Borrowed from https://huggingface.co/nomic-ai/nomic-embed-text-v1.5#transformers """ def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) with torch.no_grad(): model_output = model(input_ids=input_ids, attention_mask=attention_mask) embeddings = mean_pooling(model_output, attention_mask) matryoshka_dim = 512 # normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) embeddings = F.layer_norm(embeddings, normalized_shape=(embeddings.shape[1],)) embeddings = embeddings[:, :matryoshka_dim] return embeddings
Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: query_dataset (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset.
def build_dataset(config, query_dataset, input_min_text_length=2, input_max_text_length=8): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: query_dataset (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token # load imdb with datasets ds = load_dataset(query_dataset, split="train") ds = ds.rename_columns({"text": "review"}) ds = ds.filter(lambda x: len(x["review"]) > 200, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") return ds
Decorator marking a test that requires peft. Skips the test if peft is not available.
def require_peft(test_case): """ Decorator marking a test that requires peft. Skips the test if peft is not available. """ if not is_peft_available(): test_case = unittest.skip("test requires peft")(test_case) return test_case
Decorator marking a test that requires bnb. Skips the test if bnb is not available.
def require_bitsandbytes(test_case): """ Decorator marking a test that requires bnb. Skips the test if bnb is not available. """ if not is_bitsandbytes_available(): test_case = unittest.skip("test requires bnb")(test_case) return test_case
Decorator marking a test that requires diffusers. Skips the test if diffusers is not available.
def require_diffusers(test_case): """ Decorator marking a test that requires diffusers. Skips the test if diffusers is not available. """ if not is_diffusers_available(): test_case = unittest.skip("test requires diffusers")(test_case) return test_case
Decorator marking a test that requires PIL. Skips the test if pil is not available.
def requires_pil(test_case): """ Decorator marking a test that requires PIL. Skips the test if pil is not available. """ if not is_pil_available(): test_case = unittest.skip("test requires PIL")(test_case) return test_case
Decorator marking a test that requires wandb. Skips the test if wandb is not available.
def require_wandb(test_case, required: bool = True): """ Decorator marking a test that requires wandb. Skips the test if wandb is not available. """ # XOR, i.e.: # skip if available and required = False and # skip if not available and required = True if is_wandb_available() ^ required: test_case = unittest.skip("test requires wandb")(test_case) return test_case
Decorator marking a test that requires no wandb. Skips the test if wandb is available.
def require_no_wandb(test_case): """ Decorator marking a test that requires no wandb. Skips the test if wandb is available. """ return require_wandb(test_case, required=False)
Decorator marking a test that requires multiple GPUs. Skips the test if there aren't enough GPUs.
def require_torch_multi_gpu(test_case): """ Decorator marking a test that requires multiple GPUs. Skips the test if there aren't enough GPUs. """ if torch.cuda.device_count() < 2: test_case = unittest.skip("test requires multiple GPUs")(test_case) return test_case
Decorator marking a test that requires GPUs. Skips the test if there is no GPU.
def require_torch_gpu(test_case): """ Decorator marking a test that requires GPUs. Skips the test if there is no GPU. """ if not torch.cuda.is_available(): test_case = unittest.skip("test requires GPU")(test_case) return test_case
Decorator marking a test that requires multiple XPUs. Skips the test if there aren't enough XPUs.
def require_torch_multi_xpu(test_case): """ Decorator marking a test that requires multiple XPUs. Skips the test if there aren't enough XPUs. """ if torch.xpu.device_count() < 2 and is_xpu_available(): test_case = unittest.skip("test requires multiple XPUs")(test_case) return test_case
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering. Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor: """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering. Args: logits: logits distribution shape (batch size, vocabulary size) top_k (`int`, *optional*, defaults to 0): If > 0, only keep the top k tokens with highest probability (top-k filtering) top_p (`float`, *optional*, defaults to 1.0): If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimumber of tokens we keep per batch example in the output. From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) if 0 <= top_p <= 1.0: logits = TopPLogitsWarper(top_p=top_p, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) return logits
Flatten dictionary and concatenate nested keys with separator.
def flatten_dict(nested: Dict, sep: str = "/") -> Dict: """Flatten dictionary and concatenate nested keys with separator.""" def recurse(nest: Dict, prefix: str, into: Dict) -> None: for k, v in nest.items(): if sep in k: raise ValueError(f"separator '{sep}' not allowed to be in key '{k}'") if isinstance(v, Mapping): recurse(v, prefix + k + sep, into) else: into[prefix + k] = v flat = {} recurse(nested, "", flat) return flat
Converts the stats from a flattened dict to single scalar dicts
def convert_to_scalar(stats: Dict) -> Dict: """ Converts the stats from a flattened dict to single scalar dicts """ tensorboard_stats = {} for k, v in stats.items(): # for tensorboard compatibility - arrays and tensors are ignored with tensorboard # therefore we convert single element tensors to scalars if (isinstance(v, torch.Tensor) or isinstance(v, np.ndarray)) and ( len(v.shape) == 0 or (len(v.shape) == 1 and v.shape[0] == 1) ): v = v.item() tensorboard_stats[k] = v return tensorboard_stats
Stack the values of a dict.
def stack_dicts(stats_dicts: List[Dict]) -> Dict: """Stack the values of a dict.""" results = dict() for k in stats_dicts[0]: stats_list = [torch.flatten(d[k]) for d in stats_dicts] results[k] = pad_sequence(stats_list, batch_first=True, padding_value=WANDB_PADDING) return results
Add suffix to dict keys.
def add_suffix(input_dict: Dict, suffix: str) -> Dict: """Add suffix to dict keys.""" return {k + suffix: v for k, v in input_dict.items()}
Pad tensor to size.
def pad_to_size(tensor: torch.Tensor, size: int, dim: int = 1, padding: int = 50256) -> torch.Tensor: """Pad tensor to size.""" t_size = tensor.size()[dim] if t_size == size: return tensor else: return torch.nn.functional.pad(tensor, (0, size - t_size), "constant", padding)
See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
def logprobs_from_logits(logits: torch.Tensor, labels: torch.Tensor, gather: bool = True) -> torch.Tensor: """ See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591 """ logp = F.log_softmax(logits, dim=2) if not gather: return logp logpy = torch.gather(logp, 2, labels.unsqueeze(2)).squeeze(-1) return logpy
Whiten values.
def whiten(values: torch.Tensor, shift_mean: bool = True) -> torch.Tensor: """Whiten values.""" mean, var = torch.mean(values), torch.var(values) whitened = (values - mean) * torch.rsqrt(var + 1e-8) if not shift_mean: whitened += mean return whitened
Compute mean of tensor with a masked values.
def masked_mean(values: torch.Tensor, mask: torch.Tensor, axis: Optional[bool] = None) -> torch.Tensor: """Compute mean of tensor with a masked values.""" if axis is not None: return (values * mask).sum(axis=axis) / mask.sum(axis=axis) else: return (values * mask).sum() / mask.sum()
Compute variance of tensor with masked values.
def masked_var(values: torch.Tensor, mask: torch.Tensor, unbiased: bool = True) -> torch.Tensor: """Compute variance of tensor with masked values.""" mean = masked_mean(values, mask) centered_values = values - mean variance = masked_mean(centered_values**2, mask) if unbiased: mask_sum = mask.sum() if mask_sum == 0: raise ValueError( "The sum of the mask is zero, which can happen when `mini_batch_size=1`;" "try increase the `mini_batch_size` or `gradient_accumulation_steps`" ) # note that if mask_sum == 1, then there is a division by zero issue # to avoid it you just need to use a larger minibatch_size bessel_correction = mask_sum / (mask_sum - 1) variance = variance * bessel_correction return variance
Whiten values with masked values.
def masked_whiten(values: torch.Tensor, mask: torch.Tensor, shift_mean: bool = True) -> torch.Tensor: """Whiten values with masked values.""" mean, var = masked_mean(values, mask), masked_var(values, mask) whitened = (values - mean) * torch.rsqrt(var + 1e-8) if not shift_mean: whitened += mean return whitened
Tensor extension to torch.clamp https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713
def clip_by_value(x: torch.Tensor, tensor_min: float, tensor_max: float) -> torch.Tensor: """ Tensor extension to torch.clamp https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713 """ clipped = torch.max(torch.min(x, tensor_max), tensor_min) return clipped
Calculate entropy from logits.
def entropy_from_logits(logits: torch.Tensor) -> torch.Tensor: """Calculate entropy from logits.""" pd = torch.nn.functional.softmax(logits, dim=-1) entropy = torch.logsumexp(logits, axis=-1) - torch.sum(pd * logits, axis=-1) return entropy
Average values of a list of dicts with torch tensors.
def average_torch_dicts(list_of_dicts: List[Dict]) -> Dict: """Average values of a list of dicts with torch tensors.""" average_dict = dict() for key in list_of_dicts[0].keys(): average_dict[key] = torch.mean(torch.stack([d[key] for d in list_of_dicts]), axis=0) return average_dict
Cast all torch.tensors in dict to numpy arrays.
def stats_to_np(stats_dict: Dict) -> Dict: """Cast all torch.tensors in dict to numpy arrays.""" new_dict = dict() for k, v in stats_dict.items(): if isinstance(v, torch.Tensor): new_dict[k] = v.detach().cpu() if new_dict[k].dtype == torch.bfloat16: new_dict[k] = new_dict[k].float() new_dict[k] = new_dict[k].numpy() else: new_dict[k] = v if np.isscalar(new_dict[k]): new_dict[k] = float(new_dict[k]) return new_dict
Sample text from language model.
def respond_to_batch( model: nn.Module, queries: List[torch.LongTensor], txt_len: int = 20, top_k: int = 0, top_p: float = 1.0 ) -> torch.LongTensor: """Sample text from language model.""" input_ids = queries for _i in range(txt_len): # Get Logits outputs = model(input_ids) next_token_logits = outputs[0][:, -1, :] next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) # Sample probs = F.softmax(next_token_logits, dim=-1) next_token = torch.multinomial(probs, num_samples=1).squeeze(1) input_ids = torch.cat([input_ids, next_token.unsqueeze(-1)], dim=-1) return input_ids[:, -txt_len:]
Helper function for reproducible behavior to set the seed in `random`, `numpy`, and `torch`. Args: seed (`int`): The seed to set.
def set_seed(seed: int) -> None: """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, and `torch`. Args: seed (`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if is_xpu_available(): torch.xpu.manual_seed_all(seed) elif is_npu_available(): torch.npu.manual_seed_all(seed) else: torch.cuda.manual_seed_all(seed)
A helper function to create random tensors on the desired `device` with the desired `dtype`. When passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor is always created on the CPU.
def randn_tensor( shape: Union[Tuple, List], generator: Optional[Union[List[torch.Generator], torch.Generator]] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, layout: Optional[torch.layout] = None, ) -> torch.Tensor: """A helper function to create random tensors on the desired `device` with the desired `dtype`. When passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor is always created on the CPU. """ # device on which tensor is created defaults to device rand_device = device batch_size = shape[0] layout = layout or torch.strided device = device or torch.device("cpu") if generator is not None: gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type if gen_device_type != device.type and gen_device_type == "cpu": rand_device = "cpu" if device != "mps": warnings.warn( f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" f" slighly speed up this function by passing a generator that was created on the {device} device." ) elif gen_device_type != device.type and gen_device_type == "cuda": raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") # make sure generator list of length 1 is treated like a non-list if isinstance(generator, list) and len(generator) == 1: generator = generator[0] if isinstance(generator, list): shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) return latents
Checks if `torch_npu` is installed and potentially if a NPU is in the environment
def is_npu_available() -> bool: """Checks if `torch_npu` is installed and potentially if a NPU is in the environment""" if find_spec("torch") is None or find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 return hasattr(torch, "npu") and torch.npu.is_available()
Perform zero verbose init - use this method on top of the CLI modules to make
def init_zero_verbose(): """ Perform zero verbose init - use this method on top of the CLI modules to make """ import logging import warnings from rich.logging import RichHandler FORMAT = "%(message)s" logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.ERROR) # Custom warning handler to redirect warnings to the logging system def warning_handler(message, category, filename, lineno, file=None, line=None): logging.warning(f"{filename}:{lineno}: {category.__name__}: {message}") # Add the custom warning handler - we need to do that before importing anything to make sure the loggers work well warnings.showwarning = warning_handler
return a callable function that takes in a "messages" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset
def conversations_formatting_function(tokenizer: AutoTokenizer, messages_field: Literal["messages", "conversations"]): r""" return a callable function that takes in a "messages" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset """ def format_dataset(examples): if isinstance(examples[messages_field][0], list): output_texts = [] for i in range(len(examples[messages_field])): output_texts.append(tokenizer.apply_chat_template(examples[messages_field][i], tokenize=False)) return output_texts else: return tokenizer.apply_chat_template(examples[messages_field], tokenize=False) return format_dataset
return a callable function that takes in an "instructions" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset
def instructions_formatting_function(tokenizer: AutoTokenizer): r""" return a callable function that takes in an "instructions" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset """ def format_dataset(examples): if isinstance(examples["prompt"], list): output_texts = [] for i in range(len(examples["prompt"])): converted_sample = [ {"role": "user", "content": examples["prompt"][i]}, {"role": "assistant", "content": examples["completion"][i]}, ] output_texts.append(tokenizer.apply_chat_template(converted_sample, tokenize=False)) return output_texts else: converted_sample = [ {"role": "user", "content": examples["prompt"]}, {"role": "assistant", "content": examples["completion"]}, ] return tokenizer.apply_chat_template(converted_sample, tokenize=False) return format_dataset
Finds the correct formatting function based on the dataset structure. Currently supported datasets are: - `ChatML` with [{"role": str, "content": str}] - `instruction` with [{"prompt": str, "completion": str}] Args: dataset (Dataset): User dataset tokenizer (AutoTokenizer): Tokenizer used for formatting Returns: Callable: Formatting function if the dataset format is supported else None
def get_formatting_func_from_dataset( dataset: Union[Dataset, ConstantLengthDataset], tokenizer: AutoTokenizer ) -> Optional[Callable]: r""" Finds the correct formatting function based on the dataset structure. Currently supported datasets are: - `ChatML` with [{"role": str, "content": str}] - `instruction` with [{"prompt": str, "completion": str}] Args: dataset (Dataset): User dataset tokenizer (AutoTokenizer): Tokenizer used for formatting Returns: Callable: Formatting function if the dataset format is supported else None """ if isinstance(dataset, Dataset): if "messages" in dataset.features: if dataset.features["messages"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "messages") if "conversations" in dataset.features: if dataset.features["conversations"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "conversations") elif dataset.features == FORMAT_MAPPING["instruction"]: logging.info("Formatting dataset with instruction format") return instructions_formatting_function(tokenizer) return None
Creates a static reference copy of a model. Note that model will be in `.eval()` mode. Args: model (`PreTrainedModelWrapper`): The model to be copied. num_shared_layers (`int`, *optional*): The number of initial layers that are shared between both models and kept frozen. pattern (`str`, *optional*): The shared layers are selected with a string pattern (e.g. "transformer.h.{layer}" for GPT2) and if a custom pattern is necessary it can be passed here. Returns `PreTrainedModelWrapper`
def create_reference_model( model: PreTrainedModelWrapper, num_shared_layers: Optional[int] = None, pattern: Optional[str] = None ) -> PreTrainedModelWrapper: """ Creates a static reference copy of a model. Note that model will be in `.eval()` mode. Args: model (`PreTrainedModelWrapper`): The model to be copied. num_shared_layers (`int`, *optional*): The number of initial layers that are shared between both models and kept frozen. pattern (`str`, *optional*): The shared layers are selected with a string pattern (e.g. "transformer.h.{layer}" for GPT2) and if a custom pattern is necessary it can be passed here. Returns `PreTrainedModelWrapper` """ if is_deepspeed_zero3_enabled(): raise ValueError( "DeepSpeed ZeRO-3 is enabled and is not compatible with `create_reference_model()`. Please instantiate your reference model directly with `AutoCausalLM.from_pretrained()`." ) parameter_names = [n for n, _ in model.named_parameters()] ref_model = deepcopy(model) # if no layers are shared, return copy of model if num_shared_layers is None: for param_name in parameter_names: param = ref_model.get_parameter(param_name) param.requires_grad = False return ref_model.eval() # identify layer name pattern if pattern is not None: pattern = pattern.format(layer=num_shared_layers) else: for pattern_candidate in LAYER_PATTERNS: pattern_candidate = pattern_candidate.format(layer=num_shared_layers) if any(pattern_candidate in name for name in parameter_names): pattern = pattern_candidate break if pattern is None: raise ValueError("Layer pattern could not be matched.") # divide parameters in shared and unshared parameter lists shared_param_list = [] unshared_param_list = [] shared_parameter = True for name, _param in model.named_parameters(): if pattern in name: shared_parameter = False if shared_parameter: shared_param_list.append(name) else: unshared_param_list.append(name) # create reference of the original parameter if they are shared for param_name in shared_param_list: param = model.get_parameter(param_name) param.requires_grad = False _ref_param = ref_model.get_parameter(param_name) # for all other parameters just make sure they don't use gradients for param_name in unshared_param_list: param = ref_model.get_parameter(param_name) param.requires_grad = False if pattern is not None and len(unshared_param_list) == 0: logging.warning("Pattern passed or found, but no layers matched in the model. Check for a typo.") return ref_model.eval()
As opposed to the default direction of broadcasting (right to left), this function broadcasts from left to right Args: input_tensor (`torch.FloatTensor`): is the tensor to broadcast shape (`Tuple[int]`): is the shape to broadcast to
def _left_broadcast(input_tensor, shape): """ As opposed to the default direction of broadcasting (right to left), this function broadcasts from left to right Args: input_tensor (`torch.FloatTensor`): is the tensor to broadcast shape (`Tuple[int]`): is the shape to broadcast to """ input_ndim = input_tensor.ndim if input_ndim > len(shape): raise ValueError( "The number of dimensions of the tensor to broadcast cannot be greater than the length of the shape to broadcast to" ) return input_tensor.reshape(input_tensor.shape + (1,) * (len(shape) - input_ndim)).broadcast_to(shape)
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` will have not effect. generator: random number generator. variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we can directly provide the noise for the variance itself. This is useful for methods such as CycleDiffusion. (https://arxiv.org/abs/2210.05559) Returns: `DDPOSchedulerOutput`: the predicted sample at the previous timestep and the log probability of the sample
def scheduler_step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, eta: float = 0.0, use_clipped_model_output: bool = False, generator=None, prev_sample: Optional[torch.FloatTensor] = None, ) -> DDPOSchedulerOutput: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` will have not effect. generator: random number generator. variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we can directly provide the noise for the variance itself. This is useful for methods such as CycleDiffusion. (https://arxiv.org/abs/2210.05559) Returns: `DDPOSchedulerOutput`: the predicted sample at the previous timestep and the log probability of the sample """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps # to prevent OOB on gather prev_timestep = torch.clamp(prev_timestep, 0, self.config.num_train_timesteps - 1) # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod.gather(0, timestep.cpu()) alpha_prod_t_prev = torch.where( prev_timestep.cpu() >= 0, self.alphas_cumprod.gather(0, prev_timestep.cpu()), self.final_alpha_cumprod, ) alpha_prod_t = _left_broadcast(alpha_prod_t, sample.shape).to(sample.device) alpha_prod_t_prev = _left_broadcast(alpha_prod_t_prev, sample.shape).to(sample.device) beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) pred_epsilon = model_output elif self.config.prediction_type == "sample": pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = _get_variance(self, timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) std_dev_t = _left_broadcast(std_dev_t, sample.shape).to(sample.device) if use_clipped_model_output: # the pred_epsilon is always re-derived from the clipped x_0 in Glide pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample_mean = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if prev_sample is not None and generator is not None: raise ValueError( "Cannot pass both generator and prev_sample. Please make sure that either `generator` or" " `prev_sample` stays `None`." ) if prev_sample is None: variance_noise = randn_tensor( model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype, ) prev_sample = prev_sample_mean + std_dev_t * variance_noise # log prob of prev_sample given prev_sample_mean and std_dev_t log_prob = ( -((prev_sample.detach() - prev_sample_mean) ** 2) / (2 * (std_dev_t**2)) - torch.log(std_dev_t) - torch.log(torch.sqrt(2 * torch.as_tensor(np.pi))) ) # mean along all but batch dimension log_prob = log_prob.mean(dim=tuple(range(1, log_prob.ndim))) return DDPOSchedulerOutput(prev_sample.type(sample.dtype), log_prob)
Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). guidance_rescale (`float`, *optional*, defaults to 0.7): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. Examples: Returns: `DDPOPipelineOutput`: The generated image, the predicted latents used to generate the image and the associated log probabilities
def pipeline_step( self, prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). guidance_rescale (`float`, *optional*, defaults to 0.7): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. Examples: Returns: `DDPOPipelineOutput`: The generated image, the predicted latents used to generate the image and the associated log probabilities """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order all_latents = [latents] all_log_probs = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 scheduler_output = scheduler_step(self.scheduler, noise_pred, t, latents, eta) latents = scheduler_output.latents log_prob = scheduler_output.log_probs all_latents.append(latents) all_log_probs.append(log_prob) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() return DDPOPipelineOutput(image, all_latents, all_log_probs)
Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. mapping (`dict[str, str]`): The mapping to use for conversion, the mapping should be a dictionary with the following structure: - key: the pattern to replace - value: the pattern to replace with Returns: converted_state_dict (`dict`) The converted state dict.
def convert_state_dict(state_dict, mapping): r""" Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. mapping (`dict[str, str]`): The mapping to use for conversion, the mapping should be a dictionary with the following structure: - key: the pattern to replace - value: the pattern to replace with Returns: converted_state_dict (`dict`) The converted state dict. """ converted_state_dict = {} for k, v in state_dict.items(): # First, filter out the keys that we always want to replace for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): if pattern in k: new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] k = k.replace(pattern, new_pattern) for pattern in mapping.keys(): if pattern in k: new_pattern = mapping[pattern] k = k.replace(pattern, new_pattern) break converted_state_dict[k] = v return converted_state_dict
Converts a state dict to new diffusers format. The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will return the state dict as is. The method only supports the conversion from diffusers old, PEFT to diffusers new for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method.
def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): r""" Converts a state dict to new diffusers format. The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will return the state dict as is. The method only supports the conversion from diffusers old, PEFT to diffusers new for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method. """ peft_adapter_name = kwargs.pop("adapter_name", None) if peft_adapter_name is not None: peft_adapter_name = "." + peft_adapter_name else: peft_adapter_name = "" if original_type is None: # Old diffusers to PEFT if any("to_out_lora" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS_OLD elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): original_type = StateDictType.PEFT elif any("lora_linear_layer" in k for k in state_dict.keys()): # nothing to do return state_dict else: raise ValueError("Could not automatically infer state dict type") if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping)
Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens. Args: model (`~transformers.PreTrainedModel`): The model to be modified. tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified. format (`Optional[Literal["chatml"]]`): The format to be set. Defaults to "chatml". resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None. Returns: model (`~transformers.PreTrainedModel`): The modified model. tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer.
def setup_chat_format( model: PreTrainedModel, tokenizer: PreTrainedTokenizer, format: Optional[Literal["chatml"]] = "chatml", resize_to_multiple_of: Optional[int] = None, ) -> Tuple[PreTrainedModel, PreTrainedTokenizer]: """ Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens. Args: model (`~transformers.PreTrainedModel`): The model to be modified. tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified. format (`Optional[Literal["chatml"]]`): The format to be set. Defaults to "chatml". resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None. Returns: model (`~transformers.PreTrainedModel`): The modified model. tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer. """ # check if format available and retrieve if format not in FORMAT_MAPPING: raise ValueError(f"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}") chat_format = FORMAT_MAPPING[format]() # set special tokens and them tokenizer.eos_token = chat_format.eos_token tokenizer.pad_token = chat_format.pad_token tokenizer.bos_token = chat_format.bos_token tokenizer.add_special_tokens({"additional_special_tokens": [chat_format.bos_token, chat_format.eos_token]}) # set chat format for tokenizer tokenizer.chat_template = chat_format.chat_template # resize embedding layer to a multiple of 64, https://x.com/karpathy/status/1621578354024677377 model.resize_token_embeddings( len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None ) # Update the model config to use the new eos & bos tokens if getattr(model, "config", None) is not None: model.config.pad_token_id = tokenizer.pad_token_id model.config.bos_token_id = tokenizer.bos_token_id model.config.eos_token_id = tokenizer.eos_token_id # Update the generation config to use the new eos & bos token if getattr(model, "generation_config", None) is not None: model.generation_config.bos_token_id = tokenizer.bos_token_id model.generation_config.eos_token_id = tokenizer.eos_token_id model.generation_config.pad_token_id = tokenizer.pad_token_id return model, tokenizer
Removes the optimizer hooks from a DeepSpeed ZeRO-3 model.
def remove_hooks(model: "DeepSpeedEngine") -> None: """Removes the optimizer hooks from a DeepSpeed ZeRO-3 model.""" if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"): optimizer_offload = model.optimizer.parameter_offload elif model.optimizer is not None: optimizer_offload = model.optimizer for hook in optimizer_offload.forward_hooks: hook.remove() for hook in optimizer_offload.backward_hooks: hook.remove() optimizer_offload.forward_hooks = [] optimizer_offload.backward_hooks = []