response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Check that we don't choke on non-contigous tensors | def test_no_contiguous(dtype):
"""Check that we don't choke on non-contigous tensors"""
shape = (8, 384, 128)
# Get the same inputs
torch.random.manual_seed(0)
torch.cuda.manual_seed(0)
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=True, dtype=dtype)
X = X.transpose(2, 1).contiguous().transpose(2, 1)
assert not X.is_contiguous()
triton_layernorm = FusedLayerNorm(X.shape[-1]).to(device="cuda", dtype=dtype)
_ = triton_layernorm(X) |
Check that PyTorch and Triton softmax give the same result | def test_softmax_parity(shape, amp, log, masking, causal, contiguous):
"""Check that PyTorch and Triton softmax give the same result"""
torch.random.manual_seed(0)
# Check the result of a FW pass
X = torch.normal(0, 1, size=shape, device="cuda", requires_grad=False)
if not contiguous:
# Make sure that the buffer is not contiguous
X = X.transpose(-2, -1).contiguous().transpose(-2, -1)
X_ = X.clone()
X.requires_grad = True
X_.requires_grad = True
seq = shape[-1]
mask = torch.zeros((seq, seq)).cuda()
if masking:
mask[torch.rand((seq, seq)) > 0.8] = -float("inf")
mask_triton = mask.clone() if masking else None
if causal:
mask[~torch.tril(torch.ones_like(mask)).bool()] = -float("inf")
with autocast(enabled=amp):
y_torch = (
torch.log_softmax(X + mask, dim=-1)
if log
else torch.softmax(X + mask, dim=-1)
)
y_triton = (
triton_log_softmax(X_, mask_triton, causal)
if log
else triton_softmax(X_, mask_triton, causal)
)
assert torch.allclose(y_torch, y_triton, equal_nan=True)
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch.transpose(-2, -1) @ y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton.transpose(-2, -1) @ y_triton)
loss_triton.backward()
assert torch.allclose(
torch.norm(X.grad), torch.norm(X_.grad), equal_nan=True, atol=1e-5
), f"{torch.norm(X.grad)}, {torch.norm(X_.grad)}" |
Check that the fallback paths are correct | def test_softmax_parity_fallback(log, masking, causal, contiguous, device):
"""Check that the fallback paths are correct"""
torch.random.manual_seed(0)
shape = (16, 16)
# Check the result of a FW pass
X = torch.normal(0, 1, size=shape, device=device, requires_grad=False)
if not contiguous:
# Make sure that the buffer is not contiguous
X = X.transpose(-2, -1).contiguous().transpose(-2, -1)
X_ = X.clone()
X.requires_grad = True
X_.requires_grad = True
seq = shape[1]
mask = torch.zeros((seq, seq), device=device)
if masking:
mask[torch.rand((seq, seq), device=device) > 0.8] = -float("inf")
mask_causal = torch.zeros_like(mask)
if causal:
mask_causal[~torch.tril(torch.ones_like(mask)).bool()] = -float("inf")
y_torch = (
torch.log_softmax(X + mask + mask_causal, dim=-1)
if log
else torch.softmax(X + mask + mask_causal, dim=-1)
)
y_triton = (
triton_log_softmax(X_, mask, causal)
if log
else triton_softmax(X_, mask, causal)
)
assert torch.allclose(y_torch, y_triton, equal_nan=True)
# Check that BW also gives the same result
loss_torch = torch.norm(y_torch.transpose(-2, -1) @ y_torch)
loss_torch.backward()
loss_triton = torch.norm(y_triton.transpose(-2, -1) @ y_triton)
loss_triton.backward()
assert torch.allclose(
torch.norm(X.grad), torch.norm(X_.grad), equal_nan=True, atol=1e-5
), f"{torch.norm(X.grad)}, {torch.norm(X_.grad)}" |
Create block tables and pages K/V cache for testing paged attention.
Args:
cache_k, cache_v: K/V caches, each of shape [B, MAX_T, H_kv, D].
Note that these tensors are unexpanded,
i.e. for multiquery case cache_k.shape[2] = 1
kv_seqlens: list of K/V sequence lengths
BLOCK_N: number of tokens per per paged attention block
B: batch size
Returns:
block_tables: [B, MAX_BLOCKS]
packed_cache_k: [1, total_len_rounded, H_kv, D]
packed_cache_v: [1, total_len_rounded, H_kv, D]
where total_len_rounded is a sum of K/V seqlens, each rounded up
to a multiple of BLOCK_N. | def pack_kv_cache(
cache_k: torch.Tensor,
cache_v: torch.Tensor,
kv_seqlens: List[int],
BLOCK_N: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Create block tables and pages K/V cache for testing paged attention.
Args:
cache_k, cache_v: K/V caches, each of shape [B, MAX_T, H_kv, D].
Note that these tensors are unexpanded,
i.e. for multiquery case cache_k.shape[2] = 1
kv_seqlens: list of K/V sequence lengths
BLOCK_N: number of tokens per per paged attention block
B: batch size
Returns:
block_tables: [B, MAX_BLOCKS]
packed_cache_k: [1, total_len_rounded, H_kv, D]
packed_cache_v: [1, total_len_rounded, H_kv, D]
where total_len_rounded is a sum of K/V seqlens, each rounded up
to a multiple of BLOCK_N.
"""
kv_seqlens_rounded = [(x + BLOCK_N - 1) // BLOCK_N * BLOCK_N for x in kv_seqlens]
total_len_rounded = sum(kv_seqlens_rounded)
B, MAX_T, H, D = cache_k.shape
packed_cache_k = torch.empty(
total_len_rounded, H, D, device=cache_k.device, dtype=cache_k.dtype
)
packed_cache_v = torch.empty(
total_len_rounded, H, D, device=cache_k.device, dtype=cache_k.dtype
)
seqstart = 0
for b in range(B):
packed_cache_k[seqstart : seqstart + kv_seqlens[b]] = cache_k[
b, : kv_seqlens[b]
].clone()
packed_cache_v[seqstart : seqstart + kv_seqlens[b]] = cache_v[
b, : kv_seqlens[b]
].clone()
seqstart += kv_seqlens_rounded[b]
num_blocks_per_row = (MAX_T + BLOCK_N - 1) // BLOCK_N
block_tables = (
torch.arange(num_blocks_per_row, device="cuda", dtype=torch.int32)
.unsqueeze(0)
.expand(B, num_blocks_per_row)
)
seqstarts = (
(
torch.tensor(kv_seqlens_rounded).cumsum(dim=0)
- torch.tensor(kv_seqlens_rounded)
)
.to(device="cuda")
.unsqueeze(1)
) // BLOCK_N
block_tables = (block_tables + seqstarts).contiguous().to(dtype=torch.int32)
return (
block_tables,
packed_cache_k.unsqueeze(0),
packed_cache_v.unsqueeze(0),
) |
Benchmark the runtime of the provided function.
Args:
fn: Function to benchmark
rep: Repetition time (in ms)
grad_to_none: Reset the gradient of the provided tensor to None
Returns:
Benchmarked runtime in ms | def do_bench_cudagraph(
fn: Callable, rep: int = 20, grad_to_none: Optional[List[torch.Tensor]] = None
) -> float:
"""
Benchmark the runtime of the provided function.
Args:
fn: Function to benchmark
rep: Repetition time (in ms)
grad_to_none: Reset the gradient of the provided tensor to None
Returns:
Benchmarked runtime in ms
"""
if torch.cuda.current_stream() == torch.cuda.default_stream():
raise RuntimeError(
"Cannot capture graph in default stream. "
"Please use side stream in benchmark code."
)
# warmup
fn()
# step 1 - we estimate the amount of time the kernel call takes
# NOTE: this estimate isn't super accurate because the GPU isn't warmed up at this point
# but it is probably good enough
if grad_to_none is not None:
for x in grad_to_none:
x.detach_()
x.requires_grad_(True)
x.grad = None
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
fn()
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
g.replay()
end_event.record()
torch.cuda.synchronize()
estimate_ms = start_event.elapsed_time(end_event)
n_repeat = max(1, int(rep / estimate_ms))
# step 2 - construct a cuda graph with `n_repeat` unrolled function calls to minimize
# host overhead
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
for i in range(n_repeat):
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
fn()
torch.cuda.synchronize()
# measure time and return
ret = []
n_retries = 10
for i in range(n_retries):
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
g.replay()
end_event.record()
torch.cuda.synchronize()
ret += [start_event.elapsed_time(end_event) / n_repeat]
return torch.mean(torch.tensor(ret)).item() |
Generates lists of lengths of query blocks and corresponding key blocks.
The total number of queries will be bs * q_len and the
total number of keys will be bs * kv_len.
max_q_minus_k: maximum allowed num_queries - num_keys.
For "bottom-right" masks it's 0, we need to have more keys than
queries, otherwise some queries have no keys to attend to.
For BlockDiagonalCausalMask it's None, there is no constraint
on num_queries - num_keys.
For BlockDiagonalCausalLocalAttentionMask it's equal
to the window size. | def _rand_seqlens(
r: random.Random,
bs: int,
q_len: int,
kv_len: int,
max_q_minus_k: Optional[int],
) -> Tuple[Sequence[int], Sequence[int]]:
"""
Generates lists of lengths of query blocks and corresponding key blocks.
The total number of queries will be bs * q_len and the
total number of keys will be bs * kv_len.
max_q_minus_k: maximum allowed num_queries - num_keys.
For "bottom-right" masks it's 0, we need to have more keys than
queries, otherwise some queries have no keys to attend to.
For BlockDiagonalCausalMask it's None, there is no constraint
on num_queries - num_keys.
For BlockDiagonalCausalLocalAttentionMask it's equal
to the window size.
"""
if max_q_minus_k == 0:
# In case max_q_minus_k > 0 the exact condition is
# kv_len >= q_len - max_q_minus_k * batch_size,
# but we can't check it without knowing the actual batch size,
# which is determined in the loop below.
assert kv_len >= q_len
q_len *= bs
kv_len *= bs
seqlens_q: List[int] = []
seqlens_k: List[int] = []
step_q = [max(1, q_len // 10), max(2, q_len // 2)]
step_k = [max(1, kv_len // 10), max(2, kv_len // 2)]
while sum(seqlens_q) < q_len and sum(seqlens_k) < kv_len:
if max_q_minus_k is None:
# Simple case - no constraint on the number of queries and keys.
num_queries = r.randrange(*step_q)
seqlens_q.append(num_queries)
seqlens_k.append(r.randrange(*step_k))
else:
# In this case we need to make sure num_queries - num_keys < max_q_minus_k holds for every batch element.
# To do this, when choosing num_queries and num_keys at a given step,
# we ensure two conditions are satisfied:
# 1) num_queries <= num_keys + max_q_minus_k for the current batch element
# 2) Same holds for the remaining keys and queries, i.e.
# queries_left - num_queries <= keys_left - num_keys + max_q_minus_k
keys_left = kv_len - sum(seqlens_k, 0)
queries_left = q_len - sum(seqlens_q, 0)
assert (
keys_left >= queries_left - max_q_minus_k
), f"{keys_left=} {queries_left=} {max_q_minus_k=} {kv_len=} {q_len=} {seqlens_k=} {seqlens_q=}"
# Limit num_queries from above: if num_queries > keys_left + max_q_minus_k,
# condition num_queries <= num_keys + max_q_minus_k can't be satisfied even if we take
# all the remaining keys
max_queries_to_take = min(queries_left, keys_left + max_q_minus_k)
num_queries = r.randrange(1, max_queries_to_take + 1)
seqlens_q.append(num_queries)
# Now we know num_queries, let's select num_keys.
# How many keys can we use for the current batch element so that
# for the remaining keys and values the constraint
# num_queries - num_keys < max_q_minus_k holds on the next step?
extra_keys_available = keys_left - queries_left + max_q_minus_k + 1
assert extra_keys_available >= 0
if extra_keys_available > 0:
seqlens_k.append(num_queries + r.randrange(0, extra_keys_available))
else:
seqlens_k.append(num_queries)
seqlens_q[-1] = q_len - sum(seqlens_q[:-1])
seqlens_k[-1] = kv_len - sum(seqlens_k[:-1])
return seqlens_q, seqlens_k |
Returns the list of operators used inside `function` with
*args and **kwargs | def list_operators(function, *args, **kwargs):
"""
Returns the list of operators used inside `function` with
*args and **kwargs
"""
verbose_mode = VerboseTorchDispatchMode()
with verbose_mode:
function(*args, **kwargs)
return verbose_mode.operators |
An activation checkpoint context_fn for selectively deciding what to
store and what to recompute. Accepts a custom policy.
Args:
policy_fn(Union[List[Op], callable]): policy for deciding what to
store (instead of recompute). If it's a function, it should
be of form (func, *args, **kwargs) -> bool which indicates
if func outputs with *args and **kwargs should be stored or not.
Additionally, a list[Op] is also supported for easier cases.
The op should be in the format `torch.ops.***`, where the `***`
names of operators can be obtained with `list_operators`. | def selective_checkpoint_context_fn(policy_fn=None):
"""An activation checkpoint context_fn for selectively deciding what to
store and what to recompute. Accepts a custom policy.
Args:
policy_fn(Union[List[Op], callable]): policy for deciding what to
store (instead of recompute). If it's a function, it should
be of form (func, *args, **kwargs) -> bool which indicates
if func outputs with *args and **kwargs should be stored or not.
Additionally, a list[Op] is also supported for easier cases.
The op should be in the format `torch.ops.***`, where the `***`
names of operators can be obtained with `list_operators`.
"""
if policy_fn is None:
policy_fn = _get_default_policy()
elif isinstance(policy_fn, list):
policy_fn = _get_default_policy(policy_fn)
else:
assert callable(policy_fn), "policy_fn should be None, list or a callable"
temp_storage: Dict[Any, List[Any]] = defaultdict(list)
# assumption: grad_mode doesn't change inside function
caching_mode: ContextManager[None]
if torch.is_grad_enabled():
caching_mode = _CachingTorchDispatchMode(deepcopy(policy_fn), temp_storage)
else:
caching_mode = NullTorchDispatchMode()
cached_mode = CachedTorchDispatchMode(deepcopy(policy_fn), temp_storage)
return caching_mode, cached_mode |
Wrapper around torch.utils.checkpoint that accepts a custom policy
function for selectively deciding what to store and what to recompute
Args:
function: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
preserve_rng_state(bool, optional): Omit stashing and restoring
the RNG state during each checkpoint.
Default: ``True``
policy_fn(Union[List[Op], callable]): policy for deciding what to
store (instead of recompute). If it's a function, it should
be of form (func, *args, **kwargs) -> bool which indicates
if func outputs with *args and **kwargs should be stored or not.
Additionally, a list[Op] is also supported for easier cases.
The op should be in the format `torch.ops.***`, where the `***`
names of operators can be obtained with `list_operators`.
*args: Arguments to pass in to the given ``function``.
**kwargs: Keyword arguments to pass into the given ``function``. | def checkpoint(
function, *args, preserve_rng_state=True, policy_fn=None, **kwargs
) -> Any:
"""Wrapper around torch.utils.checkpoint that accepts a custom policy
function for selectively deciding what to store and what to recompute
Args:
function: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
preserve_rng_state(bool, optional): Omit stashing and restoring
the RNG state during each checkpoint.
Default: ``True``
policy_fn(Union[List[Op], callable]): policy for deciding what to
store (instead of recompute). If it's a function, it should
be of form (func, *args, **kwargs) -> bool which indicates
if func outputs with *args and **kwargs should be stored or not.
Additionally, a list[Op] is also supported for easier cases.
The op should be in the format `torch.ops.***`, where the `***`
names of operators can be obtained with `list_operators`.
*args: Arguments to pass in to the given ``function``.
**kwargs: Keyword arguments to pass into the given ``function``.
"""
return torch.utils.checkpoint.checkpoint(
function,
*args,
use_reentrant=False,
preserve_rng_state=preserve_rng_state,
context_fn=functools.partial(selective_checkpoint_context_fn, policy_fn),
**kwargs,
) |
Use ProfileOperatorsTorchDispatchMode to get runtime and memory info.
Args:
function: The function to optimize which will be selectively checkpointed. Usually the forward pass
of the model.
*args: Arguments to pass in to the given ``function``.
Returns:
A list of tuples, where each tuples contains the name of the operator, the runtime of the operator,
and the memory usage of the operator. | def _analyze_operators(function, *args) -> List[ProfileMetadata]:
"""
Use ProfileOperatorsTorchDispatchMode to get runtime and memory info.
Args:
function: The function to optimize which will be selectively checkpointed. Usually the forward pass
of the model.
*args: Arguments to pass in to the given ``function``.
Returns:
A list of tuples, where each tuples contains the name of the operator, the runtime of the operator,
and the memory usage of the operator.
"""
profile_ops = ProfileOperatorsTorchDispatchMode()
with profile_ops:
function(*args)
data = profile_ops.data
return data |
Given a function, its arguments, and the maximum amount of memory available,
find the subset of operators that can be optimized to reduce runtime while still fitting within the memory budget.
Args:
function: The function to optimize which will be selectively checkpointed. Usually the forward pass
of the model.
*args: Arguments to pass in to the given ``function``.
memory_budget (float): A float between 0 and 1 which describes what percentage of the total memory to use.
Returns:
A callable policy which can be passed to xformers.checkpoint()
Raises:
RuntimeError: If `scipy` is not available.
ValueError: If `memory_budget` is not a float between 0 and 1. | def get_optimal_checkpoint_policy(function, *args, memory_budget: float) -> Callable:
"""
Given a function, its arguments, and the maximum amount of memory available,
find the subset of operators that can be optimized to reduce runtime while still fitting within the memory budget.
Args:
function: The function to optimize which will be selectively checkpointed. Usually the forward pass
of the model.
*args: Arguments to pass in to the given ``function``.
memory_budget (float): A float between 0 and 1 which describes what percentage of the total memory to use.
Returns:
A callable policy which can be passed to xformers.checkpoint()
Raises:
RuntimeError: If `scipy` is not available.
ValueError: If `memory_budget` is not a float between 0 and 1.
"""
if not _scipy_is_available:
raise RuntimeError(
"Please install scipy 1.9.0+ to use `get_optimal_checkpoint_policy`. You can do so using "
"`pip install scipy`."
)
if memory_budget < 0 or memory_budget > 1:
raise ValueError(
f"`memory_budget` must be a float between 0 and 1. Got {memory_budget}."
)
data = _analyze_operators(function, *args)
# remove aten.detach.default from the list of ops because autograd
# inserts those during backward and it breaks the fwd-bwd alignment
data = [x for x in data if x.name not in OPS_TO_ALWAYS_SKIP]
ops, runtimes_, memory_, new_ids, _, inplace_ops_, view_like_ops_, rand_ops_ = zip(
*[astuple(x) for x in data]
)
runtimes = torch.tensor(runtimes_, dtype=torch.float64)
memory = torch.tensor(memory_, dtype=torch.float64)
view_like_ops = [i for i, x in enumerate(view_like_ops_) if x]
rand_ops = [i for i, x in enumerate(rand_ops_) if x]
# remap the inplace indices as we have removed OPS_TO_ALWAYS_SKIP
inplace_ops = [tuple(map(new_ids.index, x)) for x in inplace_ops_ if x]
# the last operation is always stored as the output of the checkpoint
# block, so we can avoid recomputing it. We set the memory to zero
# instead of adding a new constraint because we want both the 0 and 1
# endpoints for memory_budget to be valid
# FIXME: this heuristic for finding the last non-view non-inplace op
# might not always be correct, which would yield suboptimal policies
last_op = len(ops) - 1
skip_ops_ = set(view_like_ops) | set([x[0] for x in inplace_ops])
skip_ops = sorted(list(skip_ops_))
for op in reversed(skip_ops):
if op == last_op:
last_op -= 1
memory[last_op] = 0
max_memory = memory_budget * memory.sum().item()
# workaround to fix https://github.com/pytorch/pytorch/issues/121212
force_store_random = all([not isinstance(x, torch.Tensor) for x in args])
optim_output = _optimize_runtime_with_given_memory(
memory=memory,
runtimes=runtimes,
max_memory=max_memory,
view_like_ops=view_like_ops,
inplace_ops=inplace_ops,
random_ops=rand_ops,
force_store_random=force_store_random,
)
return _OptimalPolicy(optim_output=optim_output) |
Given a list of operator names, their corresponding runtimes, and the maximum amount of memory available,
find the subset of operators that can be optimized to reduce runtime while still fitting within the memory budget.
Uses https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.milp.html
Args:
memory (torch.Tensor): Tensor containing the memory usage of each operator.
runtimes (torch.Tensor): Tensor containing the runtime of each operator.
max_memory (float): Maximum amount of memory to use.
view_like_ops ([List[int]): Indices of the view-like ops.
inplace_ops (List[Tuple[int, int]]): Tuple with the pair of inplace op -> parent of inplace op.
This will be used to add the constraint that in-place ops need to either be
stored in memory with the previous op, or recomputed with the previous op.
random_ops ([List[int]): Indices of the random ops, which will always be recomputed.
force_store_random (bool): force random ops to always be stored (instead of recomputed) | def _optimize_runtime_with_given_memory(
memory: torch.Tensor,
runtimes: torch.Tensor,
max_memory: float,
view_like_ops: List[int],
inplace_ops: List[Tuple[int, ...]],
random_ops: List[int],
force_store_random: bool,
) -> torch.Tensor:
"""
Given a list of operator names, their corresponding runtimes, and the maximum amount of memory available,
find the subset of operators that can be optimized to reduce runtime while still fitting within the memory budget.
Uses https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.milp.html
Args:
memory (torch.Tensor): Tensor containing the memory usage of each operator.
runtimes (torch.Tensor): Tensor containing the runtime of each operator.
max_memory (float): Maximum amount of memory to use.
view_like_ops ([List[int]): Indices of the view-like ops.
inplace_ops (List[Tuple[int, int]]): Tuple with the pair of inplace op -> parent of inplace op.
This will be used to add the constraint that in-place ops need to either be
stored in memory with the previous op, or recomputed with the previous op.
random_ops ([List[int]): Indices of the random ops, which will always be recomputed.
force_store_random (bool): force random ops to always be stored (instead of recomputed)
"""
c = -runtimes # type: ignore[operator]
memory_constraint = LinearConstraint(A=memory, ub=max_memory)
constraints = [memory_constraint]
# view-like ops should always be recomputed
for i in view_like_ops:
A = torch.zeros_like(c)
A[i] = 1
constraints.append(LinearConstraint(A=A, lb=0, ub=0))
# inplace ops should always be done in conjuction with its parent op
# i.e., if we recompute the parent op the inplace should also be
# recomputed, and vice versa
for op, op_parent in inplace_ops:
A = torch.zeros_like(c)
if op != op_parent:
A[op_parent] = 1
A[op] = -1
constraints.append(LinearConstraint(A=A, lb=0, ub=0))
else:
# if op == op_parent, it's because it's the first op
# that is inplace. Thus never recompute it
A[op] = 1
constraints.append(LinearConstraint(A=A, lb=1, ub=1))
# ideally, always recompute random ops
# in practice, due to a bug in https://github.com/pytorch/pytorch/issues/121212
# sometimes we need to store them to avoid correctness issues
for i in random_ops:
A = torch.zeros_like(c)
A[i] = 1
val = int(force_store_random)
constraints.append(LinearConstraint(A=A, lb=val, ub=val))
integrality = torch.ones_like(c)
res = milp(
c=c, constraints=constraints, integrality=integrality, bounds=Bounds(0, 1)
)
if not res.success:
raise ValueError(
"The problem is infeasible, and probably due to a change in xformers "
"that makes random ops always be stored. Try passing a larger memory_budget. "
"This will be fixed once https://github.com/pytorch/pytorch/issues/121212 "
"is solved"
)
x = torch.from_numpy(res.x)
return x |
Wrap a module with selective activation checkpointing.
It behaves similarly to PyTorch's checkpoint_wrapper, but gives the possibility
to the user to either specify a handcrafted policy_fn, or to let an optimization
algorithm to select the policy given a user-specified memory_budget.
The user should either specify the memory_budget argument or the policy_fn.
The memory_budget is a float value between 0 (recompute everything in the backward) or 1
(store everything for backward). Using a value of 0 should be similar to PyTorch's
activation checkpoint, while 1 should be similar to the behavior of not using any
activation checkpointing. | def selective_checkpoint_wrapper(
module: torch.nn.Module,
memory_budget: Optional[float] = None,
policy_fn: Optional[Callable] = None,
):
"""
Wrap a module with selective activation checkpointing.
It behaves similarly to PyTorch's checkpoint_wrapper, but gives the possibility
to the user to either specify a handcrafted policy_fn, or to let an optimization
algorithm to select the policy given a user-specified memory_budget.
The user should either specify the memory_budget argument or the policy_fn.
The memory_budget is a float value between 0 (recompute everything in the backward) or 1
(store everything for backward). Using a value of 0 should be similar to PyTorch's
activation checkpoint, while 1 should be similar to the behavior of not using any
activation checkpointing.
"""
return SelectiveCheckpointWrapper(module, memory_budget, policy_fn) |
Given a superset of the inputs and a reference config class,
return exactly the needed config | def generate_matching_config(superset: Dict[str, Any], config_class: Any) -> Any:
"""Given a superset of the inputs and a reference config class,
return exactly the needed config"""
# Extract the required fields
field_names = list(map(lambda x: x.name, fields(config_class)))
subset = {k: v for k, v in superset.items() if k in field_names}
# The missing fields get Noned
for k in field_names:
if k not in subset.keys():
subset[k] = None
return config_class(**subset) |
Printout the contents of a dict as a human-readable and Markdown compatible array | def pretty_print(results, title, units) -> None:
"""Printout the contents of a dict as a human-readable and Markdown compatible array"""
print(title)
header = " Units: {:<45}".format(units)
print("| " + header + "|" + "".join("{0:<20}|".format(k) for k in results.keys()))
offset = len(header)
print(
"|-{}|".format("-" * offset)
+ "".join("{}|".format("-" * 20) for _ in results.keys())
)
workloads: Dict[str, Any] = {k: [] for v in results.values() for k in v.keys()}
for v in results.values():
for k in v.keys():
workloads[k].append(v[k])
for k, w in workloads.items():
print(
"| {0:<{offset}}|".format(k, offset=offset)
+ "".join("{:<20}|".format(v) for v in w)
)
print("") |
Graph out the contents of a dict.
Dash key means that if the result label has this key, then it will be displayed with a dash | def pretty_plot(
results, title, units: str, filename=None, dash_key="", legend_loc="lower right"
):
"""Graph out the contents of a dict.
Dash key means that if the result label has this key, then it will be displayed with a dash
"""
if not filename:
filename = title + ".png"
# Sanitize the filename
filename = (
filename.replace(" ", "_").replace("/", "_").replace("-", "_").replace(":", "")
)
# Gather all the results in "collumns"
workloads: Dict[str, Any] = {k: [] for v in results.values() for k in v.keys()}
for v in results.values():
for k in v.keys():
workloads[k].append(float(v[k]))
# Make sure that the plot is big enough
f = plt.figure()
f.set_figwidth(6)
f.set_figheight(6)
# Display the collections
for k, v in workloads.items():
if dash_key and dash_key in k:
plt.plot(list(results.keys()), v, "--")
else:
plt.plot(list(results.keys()), v)
plt.title(title)
plt.legend(list(workloads.keys()), loc=legend_loc)
plt.ylabel(units)
plt.xticks(rotation=45)
plt.savefig(filename, bbox_inches="tight")
plt.close(f) |
Graph out the contents of a dict.
Dash key means that if the result label has this key, then it will be displayed with a dash | def pretty_barplot(results, title, units: str, filename=None, dash_key=""):
"""Graph out the contents of a dict.
Dash key means that if the result label has this key, then it will be displayed with a dash
"""
if not filename:
filename = title + ".png"
# Sanitize the filename
filename = (
filename.replace(" ", "_").replace("/", "_").replace("-", "_").replace(":", "")
)
xlabels = list(results.keys())
# Gather all the results in "collumns"
workloads: Dict[str, Any] = {k: [] for v in results.values() for k in v.keys()}
for v in results.values():
for k in v.keys():
workloads[k].append(float(v[k]))
options = list(workloads.keys())
group_len = len(options)
for key in workloads.keys():
num_groups = len(workloads[key])
break
group_width = group_len + 1
# Make sure that the plot is big enough
f = plt.figure()
f.set_figwidth(6)
f.set_figheight(6)
for idx in range(group_len):
option = options[idx]
values = workloads[option]
xloc = np.arange(1 + idx, group_width * num_groups, group_width)
plt.bar(xloc, values, width=1, edgecolor="black")
plt.title(title)
plt.legend(list(workloads.keys()), loc="upper right")
plt.ylabel(units)
ax = plt.gca()
xticks_loc = np.arange(
1 + (group_len - 1) / 2.0, group_width * num_groups, group_width
)
ax.set_xticks(xticks_loc, xlabels)
plt.xticks(rotation=45)
plt.setp(ax.xaxis.get_majorticklabels(), ha="right")
ax.set_axisbelow(True)
ax.yaxis.grid(color="gray", linestyle="dashed")
ax.xaxis.grid(color="gray", linestyle="dashed")
plt.savefig(filename, bbox_inches="tight")
plt.close(f) |
Remove a file like rm -f. | def rmf(filename: str) -> None:
"""Remove a file like rm -f."""
try:
os.remove(filename)
except FileNotFoundError:
pass |
A context to get tempfiles and ensure they are cleaned up. | def temp_files_ctx(num: int) -> Generator:
"""A context to get tempfiles and ensure they are cleaned up."""
files = [tempfile.mkstemp()[1] for _ in range(num)]
yield tuple(files)
# temp files could have been removed, so we use rmf.
for name in files:
rmf(name) |
Returns a `benchmark.Compare` object, except that if we have runs
with different algorithms, we also add the algorithm name
in the column titles | def _finalize_results(results: List[Tuple[Dict[str, Any], Any]]) -> List[Any]:
"""
Returns a `benchmark.Compare` object, except that if we have runs
with different algorithms, we also add the algorithm name
in the column titles
"""
all_algorithms: Set[str] = set()
all_description: Set[str] = set()
for metadata, r in results:
algo = metadata.get(META_ALGORITHM, None)
if algo is not None:
all_algorithms.add(algo)
all_description.add(r.task_spec.description)
display_algo = len(all_algorithms) > 1
display_descr = len(all_description) > 1
display_results = []
for metadata, r in results:
algo = metadata.get(META_ALGORITHM, None)
if algo is None:
display_results.append(r)
else:
r = copy.copy(r)
description = ""
if display_descr:
description = r.task_spec.description
if display_algo:
if display_descr:
description += "["
description += algo
if display_descr:
description += "]"
r.task_spec = replace(r.task_spec, description=description)
display_results.append(r)
return display_results |
Create CLI argument parser. | def create_argparser() -> argparse.ArgumentParser:
"""
Create CLI argument parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--fn", default=None, type=str, help="Only benchmark this function"
)
parser.add_argument(
"--label", default=None, type=str, help="Store results to a file"
)
parser.add_argument(
"--fail_if_regression",
action="store_true",
help="Enabled in CI to check against performance regressions",
)
parser.add_argument(
"--compare",
default=None,
type=str,
help="Compare to previously stored benchmarks (coma separated)",
)
parser.add_argument(
"--omit-baselines",
action="store_true",
help="Do not run the (potentially slow) baselines",
)
parser.add_argument(
"--quiet",
action="store_true",
help="Skip intermediate results and progress bar",
)
return parser |
Helper function to run benchmarks.
Supports loading previous results for comparison, and saving current results to file. | def benchmark_main_helper(
benchmark_fn, cases: List[Dict[str, Any]], arg_parser=None, **kwargs
) -> None:
"""
Helper function to run benchmarks.
Supports loading previous results for comparison, and saving current results to file.
"""
arg_parser = arg_parser or create_argparser()
args = arg_parser.parse_args()
if args.fn is not None and args.fn != get_func_name(benchmark_fn):
print(f'Skipping benchmark "{get_func_name(benchmark_fn)}"')
return
benchmark_run_and_compare(
benchmark_fn=benchmark_fn,
cases=cases,
optimized_label="optimized" if args.label is None else args.label,
fail_if_regression=args.fail_if_regression,
compare=args.compare.split(",") if args.compare is not None else [],
quiet=args.quiet,
omit_baselines=args.omit_baselines,
**kwargs,
) |
Yield all combinations of parameters in the grid (as a dict) | def grid_parameters(grid: Dict):
"""
Yield all combinations of parameters in the grid (as a dict)
"""
grid_copy = dict(grid)
# Turn single value in an Iterable
for k in grid_copy:
if not isinstance(grid_copy[k], Iterable):
grid_copy[k] = [grid_copy[k]]
for p in itertools.product(*grid_copy.values()):
yield dict(zip(grid.keys(), p)) |
See DeepNet_.
Returns alpha and beta depending on the number of encoder and decoder layers,
first tuple is for the encoder and second for the decoder
.. _DeepNet: https://arxiv.org/pdf/2203.00555v1.pdf | def get_deepnorm_coefficients(
encoder_layers: int, decoder_layers: int
) -> Tuple[Optional[DeepNormCoefficients], Optional[DeepNormCoefficients]]:
"""
See DeepNet_.
Returns alpha and beta depending on the number of encoder and decoder layers,
first tuple is for the encoder and second for the decoder
.. _DeepNet: https://arxiv.org/pdf/2203.00555v1.pdf
"""
N = encoder_layers
M = decoder_layers
if decoder_layers == 0:
# Encoder only
return (
DeepNormCoefficients(alpha=(2 * N) ** 0.25, beta=(8 * N) ** -0.25),
None,
)
elif encoder_layers == 0:
# Decoder only
return None, DeepNormCoefficients(alpha=(2 * M) ** 0.25, beta=(8 * M) ** -0.25)
else:
# Encoder/decoder
encoder_coeffs = DeepNormCoefficients(
alpha=0.81 * ((N**4) * M) ** 0.0625, beta=0.87 * ((N**4) * M) ** -0.0625
)
decoder_coeffs = DeepNormCoefficients(
alpha=(3 * M) ** 0.25, beta=(12 * M) ** -0.25
)
return (encoder_coeffs, decoder_coeffs) |
Builds a multihead attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it. | def build_multi_head_attention(
multi_head_config: Union[MultiHeadDispatchConfig, Dict[str, Any]],
):
"""Builds a multihead attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it."""
if not isinstance(multi_head_config, MultiHeadDispatchConfig):
# Extract the required fields
field_names = list(map(lambda x: x.name, fields(MultiHeadDispatchConfig)))
# The missing fields get Noned
for k in field_names:
if k not in multi_head_config.keys():
multi_head_config[k] = None
# Could be that the attention needs to be instantiated
if not isinstance(multi_head_config["attention"], Attention):
# Convenience: fill in possible missing fields
if "num_heads" not in multi_head_config["attention"]:
multi_head_config["attention"]["num_heads"] = multi_head_config[
"num_heads"
]
if "dim_model" not in multi_head_config["attention"]:
multi_head_config["attention"]["dim_model"] = multi_head_config[
"dim_model"
]
if (
"dim_features" not in multi_head_config["attention"]
or multi_head_config["attention"]["dim_features"] is None
):
multi_head_config["attention"]["dim_features"] = (
multi_head_config["dim_model"] // multi_head_config["num_heads"]
)
multi_head_config["attention"] = build_attention(
multi_head_config["attention"]
)
multi_head_config = MultiHeadDispatchConfig(**multi_head_config)
return MultiHeadDispatch.from_config(multi_head_config) |
Returns a 2d pattern that samples 1 every k elements in the attention mask.
Can be seen as a form of downsampling, where every pixel attends to a downsampled
version of the input. | def dilated_2d_pattern(H, W, k=2):
"""
Returns a 2d pattern that samples 1 every k elements in the attention mask.
Can be seen as a form of downsampling, where every pixel attends to a downsampled
version of the input.
"""
d_h = local_nd_distance(H, W, p=1, weights=(1, 0))
d_w = local_nd_distance(H, W, p=1, weights=(0, 1))
d = (d_h.floor() % k == 0) & (d_w.floor() % k == 0)
return d |
Block sparsify a tensor, given a mask and block size | def block_sparsify_tensor(x, mask, block_size):
"""
Block sparsify a tensor, given a mask and block size
"""
ret = torch.empty(
(x.size(0), mask.sum(), block_size, block_size), dtype=x.dtype, device=x.device
)
for idx, (h, i, j) in enumerate(zip(*mask.nonzero(as_tuple=True))):
ret[:, idx, :, :] = x[
:,
h,
i * block_size : (i + 1) * block_size,
j * block_size : (j + 1) * block_size,
]
return ret |
Given a mask pattern and blocksize, return the corresponding layout
which makes sure that all the positives in the mask are covered | def pattern_to_layout(mask: torch.Tensor, block_size: int) -> torch.Tensor:
r"""
Given a mask pattern and blocksize, return the corresponding layout
which makes sure that all the positives in the mask are covered
"""
assert mask.ndim >= 2, "We're expecting [Heads, Seq, Seq] or [Seq, Seq]"
_should_squeeze = False
if mask.ndim == 2:
mask = mask.unsqueeze(0)
_should_squeeze = True
assert (
mask.shape[1] % block_size == 0 and mask.shape[2] % block_size == 0
), "We're only handling masks divisible by block_size"
# Now mark the mask
layout = torch.nn.functional.max_pool2d(
mask.to(torch.float), kernel_size=block_size, stride=block_size
)
layout = layout.to(torch.long)
if _should_squeeze:
layout.squeeze_(0)
return layout |
Use the additive bias computation from ALiBi_ to generate a mask.
Note that this mask can in turn be used to generate a blocksparse attention computation layout
.. note: mask_shape is expected to hold the [heads, seq, seq] dimensions
.. _ALiBi: https://arxiv.org/pdf/2108.12409.pdf | def alibi_pattern(threshold: float, mask_shape: torch.Size) -> torch.Tensor:
r"""
Use the additive bias computation from ALiBi_ to generate a mask.
Note that this mask can in turn be used to generate a blocksparse attention computation layout
.. note: mask_shape is expected to hold the [heads, seq, seq] dimensions
.. _ALiBi: https://arxiv.org/pdf/2108.12409.pdf
"""
# CREDITS: code snippet from Ofir Press, one of the authors
def get_slopes(n: int):
def get_slopes_power_of_2(n: int) -> List[float]:
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
# In the paper, we only train models that have 2^a heads for some a. This function has
# some good properties that only occur when the input is a power of 2. To maintain that even
# when the number of heads is not a power of 2, we use this workaround.
if math.log2(n).is_integer():
return get_slopes_power_of_2(n)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(n))
return (
get_slopes_power_of_2(closest_power_of_2)
+ get_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]
)
maxpos = mask_shape[1]
attn_heads = mask_shape[0]
slopes = torch.Tensor(get_slopes(attn_heads))
# In the next line, the part after the * is what constructs the diagonal matrix
# (right matrix in Figure 3 in the paper).
# If you run it you'll see that it doesn't exactly print out the same matrix as we have in Figure 3,
# but one where all rows are identical.
# This works because the softmax operation is invariant to translation,
# and our bias functions are always linear.
alibi = slopes.unsqueeze(1).unsqueeze(1) * torch.arange(maxpos).unsqueeze(
0
).unsqueeze(0).expand(attn_heads, -1, -1)
alibi = alibi.view(attn_heads, 1, maxpos)
# Now threshold arbitrarily, report the mask
return alibi < threshold |
create a pattern of shape [heads, seq, seq] out of a blocksparse
layout of shape [heads, seq/block_size, seq/block_size] | def layout_to_pattern(layout: torch.Tensor, block_size: int):
r"""
create a pattern of shape [heads, seq, seq] out of a blocksparse
layout of shape [heads, seq/block_size, seq/block_size]
"""
return torch.kron(layout, torch.ones(block_size, block_size)) |
Computing the Moore-Penrose inverse.
Use an iterative method from (Razavi et al. 2014) to approximate the Moore-Penrose inverse via efficient
matrix-matrix multiplications. | def iterative_pinv(softmax_mat: torch.Tensor, n_iter=6, pinverse_original_init=False):
"""
Computing the Moore-Penrose inverse.
Use an iterative method from (Razavi et al. 2014) to approximate the Moore-Penrose inverse via efficient
matrix-matrix multiplications.
"""
i = torch.eye(
softmax_mat.size(-1), device=softmax_mat.device, dtype=softmax_mat.dtype
)
k = softmax_mat
# The entries of K are positive and ||K||_{\infty} = 1 due to softmax
if pinverse_original_init:
# This original implementation is more conservative to compute coefficient of Z_0.
v = 1 / torch.max(torch.sum(k, dim=-2)) * k.transpose(-1, -2)
else:
# This is the exact coefficient computation, 1 / ||K||_1, of initialization of Z_0, leading to faster
# convergence.
v = (
1
/ torch.max(torch.sum(k, dim=-2), dim=-1).values[:, None, None]
* k.transpose(-1, -2)
)
for _ in range(n_iter):
kv = torch.matmul(k, v)
v = torch.matmul(
0.25 * v,
13 * i - torch.matmul(kv, 15 * i - torch.matmul(kv, 7 * i - kv)),
)
return v |
Builds an attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it. | def build_attention(config: Union[Dict[str, Any], AttentionConfig]):
"""Builds an attention from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_attention",
"foo": "bar"}` will find a class that was registered as "my_attention"
(see :func:`register_attention`) and call .from_config on it."""
if not isinstance(config, AttentionConfig):
try:
config_instance = generate_matching_config(
config, ATTENTION_REGISTRY[config["name"]].config
)
except KeyError as e:
name = config["name"]
logger.warning(f"{name} not available among {ATTENTION_REGISTRY.keys()}")
raise e
else:
config_instance = config
return ATTENTION_REGISTRY[config_instance.name].constructor.from_config(
config_instance
) |
Builds a feedforward from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_feedforward",
"foo": "bar"}` will find a class that was registered as "my_feedforward"
(see :func:`register_feedforward`) and call .from_config on it. | def build_feedforward(config: Union[Dict[str, Any], FeedforwardConfig]):
"""Builds a feedforward from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_feedforward",
"foo": "bar"}` will find a class that was registered as "my_feedforward"
(see :func:`register_feedforward`) and call .from_config on it."""
if not isinstance(config, FeedforwardConfig):
config_instance = generate_matching_config(
config, FEEDFORWARD_REGISTRY[config["name"]].config
)
else:
config_instance = config
return FEEDFORWARD_REGISTRY[config_instance.name].constructor.from_config(
config_instance
) |
Builds a position encoding from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_position_encoding",
"foo": "bar"}` will find a class that was registered as "my_position_encoding"
(see :func:`register_positional_embedding`) and call .from_config on it. | def build_positional_embedding(config: Union[Dict[str, Any], PositionEmbeddingConfig]):
"""Builds a position encoding from a config.
This assumes a 'name' key in the config which is used to determine what
attention class to instantiate. For instance, a config `{"name": "my_position_encoding",
"foo": "bar"}` will find a class that was registered as "my_position_encoding"
(see :func:`register_positional_embedding`) and call .from_config on it."""
if not isinstance(config, PositionEmbeddingConfig):
config_instance = generate_matching_config(
config, POSITION_EMBEDDING_REGISTRY[config["name"]].config
)
else:
config_instance = config
return POSITION_EMBEDDING_REGISTRY[config_instance.name].constructor.from_config(
config_instance
) |
Handle all the supported residual path configurations.
..Note: we return the appropriate constructor, not an actual layer | def _get_ln_factory(
d_model: int,
residual_norm_style: Optional[ResidualNormStyle],
use_triton: bool,
residual: bool,
normalization: NormalizationType = NormalizationType.LayerNorm,
residual_scale: float = 1.0,
):
"""
Handle all the supported residual path configurations.
..Note: we return the appropriate constructor, not an actual layer
"""
def get_layer_wrapper(
d_model: int,
sublayer: nn.Module,
residual_norm_style: Optional[ResidualNormStyle],
residual: bool,
residual_scale: float,
):
if residual:
if residual_norm_style == ResidualNormStyle.Pre:
return Residual(
layer=PreNorm(d_model, sublayer, normalization, use_triton),
scale=None,
)
elif residual_norm_style == ResidualNormStyle.Post:
return PostNorm(
d_model,
Residual(layer=sublayer, scale=None),
normalization,
use_triton,
)
elif residual_norm_style == ResidualNormStyle.DeepNorm:
return PostNorm(
d_model,
Residual(layer=sublayer, scale=residual_scale),
normalization,
use_triton=use_triton,
)
else:
raise ValueError
return (
PreNorm(d_model, sublayer, normalization, use_triton)
if residual_norm_style == ResidualNormStyle.Pre
else PostNorm(d_model, sublayer, normalization, use_triton)
)
def ln_factory(sublayer: nn.Module):
return get_layer_wrapper(
d_model, sublayer, residual_norm_style, residual, residual_scale
)
return ln_factory |
Best effort - OmegaConf supports limited typing, so we may fail to import
certain config classes. For example, pytorch typing are not supported. | def import_xformer_config_schema():
"""
Best effort - OmegaConf supports limited typing, so we may fail to import
certain config classes. For example, pytorch typing are not supported.
"""
cs = ConfigStore.instance()
for k, v in {
"ff": FEEDFORWARD_REGISTRY,
"pe": POSITION_EMBEDDING_REGISTRY,
"attention": ATTENTION_REGISTRY,
}.items():
for kk in v.keys():
try:
cs.store(name=f"{kk}_schema", node=v[kk].config, group=f"xformers/{k}")
except ValidationError as e:
logger.debug(f"Error registering {kk}_schema, error: {e}") |
Provide the xFormers factory with weight init routines.
Supported initializations are:
- Small: follow the method outlined in `Transformer Without Tears`_
- ViT: follow the initialization in the reference ViT_ codebase
- Timm: follow the initialization in the reference Timm_ codebase
- Moco: follow the initialization in the reference MocoV3_ codebase
.. _ViT: https://github.com/google-research/vision_transformer
.. _Timm: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
.. _MocoV3: https://github.com/facebookresearch/moco-v3 | def get_weight_init_fn(init_choice: xFormerWeightInit):
"""
Provide the xFormers factory with weight init routines.
Supported initializations are:
- Small: follow the method outlined in `Transformer Without Tears`_
- ViT: follow the initialization in the reference ViT_ codebase
- Timm: follow the initialization in the reference Timm_ codebase
- Moco: follow the initialization in the reference MocoV3_ codebase
.. _ViT: https://github.com/google-research/vision_transformer
.. _Timm: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
.. _MocoV3: https://github.com/facebookresearch/moco-v3
"""
return {
xFormerWeightInit.Timm: _init_weights_vit_timm,
xFormerWeightInit.ViT: _init_weights_vit_jax,
xFormerWeightInit.Moco: _init_weights_vit_moco,
xFormerWeightInit.Small: _init_weights_small,
}[init_choice] |
Fills the input `Tensor` with values according to the method
described in `Transformer Without Tears`_, using a uniform distribution.
This is a variation of the Xavier init. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + 4 * \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
.. _`Transformer Without Tears`: https://arxiv.org/abs/1910.05895 | def _small_init_(tensor: torch.Tensor, gain: float = 1.0) -> torch.Tensor:
r"""Fills the input `Tensor` with values according to the method
described in `Transformer Without Tears`_, using a uniform distribution.
This is a variation of the Xavier init. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + 4 * \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
.. _`Transformer Without Tears`: https://arxiv.org/abs/1910.05895
"""
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + 4 * fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return _no_grad_uniform_(tensor, -a, a) |
ViT weight initialization, matching JAX (Flax) impl | def _init_weights_vit_jax(
module: nn.Module,
name: str = "",
head_bias: float = 0.0,
gain: float = 1.0,
deepnorm_style: bool = False,
**kwargs,
):
"""ViT weight initialization, matching JAX (Flax) impl"""
if is_ffn(name):
_maybe_init_tensor(module, "bias", nn.init.normal_, std=1e-6)
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
elif is_mha_input_projection(name) or isinstance(module, nn.Linear):
if deepnorm_style and (
"q_proj" in name.split(".") or "k_proj" in name.split(".")
):
gain = 1.0
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif isinstance(module, nn.Conv2d):
_maybe_init_tensor(module, "weight", _lecun_normal, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights() # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_vit_jax(child_module, f"{name}.{child_name}", head_bias, gain) |
ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed | def _init_weights_vit_moco(
module: nn.Module,
name: str = "",
gain: float = 1.0,
**kwargs,
):
"""ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed"""
assert (
"deepnorm_style" not in kwargs.keys()
), "This initialization method does not support deepnorm"
if is_ffn(name):
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif is_mha_input_projection(name) or isinstance(module, nn.Linear):
if isinstance(module.weight, torch.Tensor):
val = (
math.sqrt(6.0 / float(module.weight.shape[0] + module.weight.shape[1]))
* gain
)
_maybe_init_tensor(module, "weight", nn.init.uniform_, a=-val, b=val)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights(gain=gain) # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_vit_moco(child_module, child_name, gain) |
Follow the `Transformer Without Tears`_ initialization for self-attention | def _init_weights_small(
module: nn.Module,
name: str = "",
head_bias: float = 0.0,
gain: float = 1.0,
deepnorm_style: bool = False,
**kwargs,
):
"""Follow the `Transformer Without Tears`_ initialization for self-attention"""
if is_ffn(name):
_maybe_init_tensor(module, "weight", torch.nn.init.xavier_uniform_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.normal_, std=1e-6)
elif is_mha_input_projection(name) or isinstance(module, nn.Linear):
# "small init" only scales the attention layers init, not the FFN
if deepnorm_style and (
"q_proj" in name.split(".") or "k_proj" in name.split(".")
):
gain = 1.0
_maybe_init_tensor(module, "weight", _small_init_, gain=gain)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif isinstance(module, nn.Conv2d):
_maybe_init_tensor(module, "weight", _lecun_normal)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights() # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_small(child_module, f"{name}.{child_name}", head_bias, gain) |
ViT weight initialization, original timm impl (for reproducibility).
See DeepNet_ for all the DeepNorm specific codepaths | def _init_weights_vit_timm(
module: nn.Module,
name: str = "",
gain: float = 1.0,
deepnorm_style: bool = False,
**kwargs,
):
"""
ViT weight initialization, original timm impl (for reproducibility).
See DeepNet_ for all the DeepNorm specific codepaths
"""
if isinstance(module, nn.Linear):
if deepnorm_style and (
"q_proj" in name.split(".") or "k_proj" in name.split(".")
):
gain = 1
std = 0.02 * gain
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
_maybe_init_tensor(
module, "weight", _no_grad_trunc_normal_, mean=0.0, std=std, a=-a, b=a
)
_maybe_init_tensor(module, "bias", nn.init.zeros_)
elif hasattr(module, "init_weights"):
module.init_weights(gain=gain) # type: ignore
else:
_maybe_report_no_init(module, name)
# Recurse over the children, if the weight init is being handled here
if not hasattr(module, "init_weights"):
for child_name, child_module in module.named_children():
_init_weights_vit_timm(child_module, child_name, gain) |
A small helper to generate hierarchical xformers configurations,
which correspond for instance to poolformer or swin architectures.
Contrary to more "classical" Transformer architectures, which conserve the sequence/context
length across layers, hierarchical Transformers trade the sequence length for the embedding dimension | def get_hierarchical_configuration(
layer_base_configs: List[BasicLayerConfig],
residual_norm_style: ResidualNormStyle = ResidualNormStyle.Pre,
use_rotary_embeddings: bool = True,
mlp_multiplier: int = 4,
in_channels: int = 3,
dim_head: Optional[int] = None,
):
"""
A small helper to generate hierarchical xformers configurations,
which correspond for instance to poolformer or swin architectures.
Contrary to more "classical" Transformer architectures, which conserve the sequence/context
length across layers, hierarchical Transformers trade the sequence length for the embedding dimension
"""
deprecated_function(get_hierarchical_configuration)
base_config: Dict[str, Any] = {
"block_type": "encoder",
"dim_model": 0,
"use_triton": False,
"residual_norm_style": str(residual_norm_style),
"multi_head_config": {
"num_heads": 1,
"use_rotary_embeddings": use_rotary_embeddings,
"attention": {
"name": "TBD",
},
},
"feedforward_config": {
"name": "TBD",
"activation": "gelu",
"hidden_layer_multiplier": mlp_multiplier,
"dropout": 0.0,
},
"position_encoding_config": {
"name": "learnable",
"seq_len": 0,
"add_class_token": False,
},
"patch_embedding_config": {
"in_channels": in_channels,
"kernel_size": 0,
"stride": 0,
"padding": 0,
},
}
xformers_config = []
in_channels = in_channels
for layer_base_config in layer_base_configs:
lc = copy.deepcopy(base_config)
lc["normalization"] = layer_base_config.normalization
# Fill in the changing model dimensions
lc["dim_model"] = layer_base_config.embedding
# Update the patches
lc["patch_embedding_config"] = {
"in_channels": in_channels,
"kernel_size": layer_base_config.patch_size,
"stride": layer_base_config.stride,
"padding": layer_base_config.padding,
}
# Update the number of channels for the next layer
in_channels = lc["dim_model"] * 1
lc["position_encoding_config"]["seq_len"] = layer_base_config.seq_len
# Fill in the number of heads (defaults to 1)
if dim_head is not None:
lc["multi_head_config"]["num_heads"] = (
layer_base_config.embedding // dim_head
)
assert layer_base_config.embedding % dim_head == 0
# Fill in the attention mechanism
lc["multi_head_config"]["attention"][
"name"
] = layer_base_config.attention_mechanism
# FIll in the feedforward
lc["feedforward_config"]["name"] = layer_base_config.feedforward
print(lc)
xformers_config.append(lc)
# Handle repeated layers (without the patch embeddings)
if layer_base_config.repeat_layer > 1:
lc_repeat = copy.deepcopy(lc)
lc_repeat.pop("patch_embedding_config")
xformers_config += [lc_repeat] * (layer_base_config.repeat_layer - 1)
return xformers_config |
In-place scaling+index_add
Indices in ``index`` are assumed to be unique
The max index in ``index`` is assumed to be less than the size of dim0 of ``input``.
:Note:
The FW pass is done in-place (``input`` is modified)
:Equivalent pytorch code:
.. code-block:: python
return torch.index_add(input, dim=0, source=scaling * src, index=indices, alpha=alpha) | def scaled_index_add(
input: torch.Tensor, # [B, M, D]
index: torch.Tensor, # [Bi] - int64
source: torch.Tensor, # [Bi, M, D]
scaling: Optional[torch.Tensor] = None, # [D]
alpha: float = 1.0,
) -> torch.Tensor:
"""
In-place scaling+index_add
Indices in ``index`` are assumed to be unique
The max index in ``index`` is assumed to be less than the size of dim0 of ``input``.
:Note:
The FW pass is done in-place (``input`` is modified)
:Equivalent pytorch code:
.. code-block:: python
return torch.index_add(input, dim=0, source=scaling * src, index=indices, alpha=alpha)
"""
return _ScaledIndexAdd.apply(input, index, source, scaling, alpha) |
Indices in ``index`` are assumed to be unique
In each (index, source) pair, the max index in ``index`` is assumed to be less than the size of dim0 of ``source``
:Example:
Given:
- ``sources[0]`` of shape ``[S0, D0]``
- ``indices[0]`` of shape ``[I0]``
- ``sources[1]`` of shape ``[S1, D1]``
- ``indices[1]`` of shape ``[I1]``
returns a ``torch.Tensor`` of shape ``[I0 * D0 + I1 * D1]``
:Equivalent pytorch code:
.. code-block:: python
return torch.cat([s[i.long()].flatten() for s, i in zip(sources, indices)], dim=0) | def index_select_cat(
sources: Sequence[torch.Tensor], indices: Sequence[torch.Tensor]
) -> torch.Tensor:
"""
Indices in ``index`` are assumed to be unique
In each (index, source) pair, the max index in ``index`` is assumed to be less than the size of dim0 of ``source``
:Example:
Given:
- ``sources[0]`` of shape ``[S0, D0]``
- ``indices[0]`` of shape ``[I0]``
- ``sources[1]`` of shape ``[S1, D1]``
- ``indices[1]`` of shape ``[I1]``
returns a ``torch.Tensor`` of shape ``[I0 * D0 + I1 * D1]``
:Equivalent pytorch code:
.. code-block:: python
return torch.cat([s[i.long()].flatten() for s, i in zip(sources, indices)], dim=0)
"""
return _IndexSelectCat.apply(*sources, *indices) |
Initializes pipes between processes of a `ProcessGroup`, that can be used
to exchange `torch.Tensor` later | def init_ipc(
group: dist.ProcessGroup,
device: Union[torch.device, str] = "cuda",
) -> List[Optional[IPCPipe]]:
"""
Initializes pipes between processes of a `ProcessGroup`, that can be used
to exchange `torch.Tensor` later
"""
if isinstance(device, str):
device = torch.device(device)
if device.index is None:
device = torch.device(device.type, index=torch.cuda.current_device())
world_size = group.size()
my_rank = group.rank()
# Open connections to all other processes. We exchange addresses via
# NCCL since we don't have access to a Store.
listeners = [
multiprocessing.connection.Listener(family="AF_UNIX", address="", backlog=1)
for _ in range(world_size)
]
# If any process is late, all other ones will block here
all_addresses = _exchange_addresses(listeners, group, device)
connections: Any = []
for other_rank in range(world_size):
# For p2p connection between ranks i<->j
# if `i<j`, `i` listens, and `j` connects
if my_rank < other_rank: # `other` connects to me
connections.append(listeners[other_rank].accept())
elif other_rank == my_rank:
connections.append(None)
else:
connections.append(
multiprocessing.connection.Client(
family="AF_UNIX",
# Mypy wants it to be str, but it actually can also be bytes
# https://github.com/python/typeshed/issues/10054
address=all_addresses[other_rank][my_rank],
)
)
return [
IPCPipe(connection, my_device=device) if connection is not None else None
for connection in connections
] |
RMS Normalization along the last dimension.
This is similar to torch.nn.functional.normalize but with eps being added
instead of max.
Expects x contiguous of shape (..., dim), and returns normalized data
of the same shape. For each dim-length vector x, the result has
x / sqrt( x*x.sum() + eps)
If weights are included, they are a contiguous parameter of length dim
which multiplies the result.
This functionality is experimental. Its API might be changed without warnings.
Use it at your own risk. | def rms_norm(x, weight: Optional[torch.Tensor], eps: float = 1e-6):
"""
RMS Normalization along the last dimension.
This is similar to torch.nn.functional.normalize but with eps being added
instead of max.
Expects x contiguous of shape (..., dim), and returns normalized data
of the same shape. For each dim-length vector x, the result has
x / sqrt( x*x.sum() + eps)
If weights are included, they are a contiguous parameter of length dim
which multiplies the result.
This functionality is experimental. Its API might be changed without warnings.
Use it at your own risk.
"""
assert _is_triton_available()
from ._triton.rmsnorm_kernels import _rms_norm_forward
if torch.is_grad_enabled() and (
x.requires_grad or (weight is not None and weight.requires_grad)
):
raise ValueError("Gradients not supported.")
return _rms_norm_forward(x, weight, eps) |
An addition fused with rms_norm.
z = rms_norm_add(x, y, weight, eps)
is equivalent to
x += y
z = rms_norm(x, weight, eps)
where x, y and z are all contiguous.
This functionality is experimental. Its API might be changed without warnings.
Use it at your own risk. | def rms_norm_add(
x: torch.Tensor, y: torch.Tensor, weight: Optional[torch.Tensor], eps: float = 1e-6
):
"""
An addition fused with rms_norm.
z = rms_norm_add(x, y, weight, eps)
is equivalent to
x += y
z = rms_norm(x, weight, eps)
where x, y and z are all contiguous.
This functionality is experimental. Its API might be changed without warnings.
Use it at your own risk.
"""
if torch.is_grad_enabled() and (
x.requires_grad
or y.requires_grad
or (weight is not None and weight.requires_grad)
):
raise ValueError("Gradients not supported.")
assert _is_triton_available()
from ._triton.rmsnorm_kernels import _rms_norm_add_forward
return _rms_norm_add_forward(x, y, weight, eps) |
Performs RoPE (rotary embeddings) and kv-cache emplacement for a heterogeneous
batch for inference in the style given by
BlockDiagonalCausalWithOffsetPaddedKeysMask.
The batch is concatenated along the sequence dimension, so the
actual dim-0 length of all tensors is 1.
xq, xk and xv should be (1, slen, n_heads, dim), where
xq's n_heads can differ from xk and xv.
This function places the roped xk in the right place in cache_k, and
xv (unmodified) in the right place in cache_v, and returns out_q
(the roped xq) such that things are ready to call
xformers.ops.memory_efficient_attention(
out_q, cache_k, cache_v, attn_bias=attn_bias
)
This functionality is experimental. Its API might be changed without warnings.
Use it at your own risk.
Arguments:
xq: tensor of queries to apply rope to
xk: tensor of keys to apply rope to
xv: tensor of values to copy into cache_v
cache_k: cache of keys, MODIFIED IN PLACE
cache_v: cache of values, MODIFIED IN PLACE
attn_bias: details the layout of caches.
Used to determine frequencies for the
RoPE calculation as well as the locations in cache_k and cache_v
to write to. Must be on the device.
first_seqpos: Optionally a tensor containing the sequence position of the
beginning of the cache for each batch element.
Providing a tensor of zeros is the same as providing None.
This affects the numerical calculation but not which memory
locations are read or written.
seqpos: Optionally a 1D tensor containing the sequence position of each
query. This should have length equal to xq.shape[1] .
This affects the numerical calculation but not which memory
locations are read or written.
adjacents: If True, the inputs are in adjacent pairs along the final dim axis.
This is like the released LLaMA model.
If False, the dim axis is split in two equal pieces.
I.e. the features are ordered with all the real parts before all
the imaginary parts. This matches HuggingFace, e.g.
https://github.com/huggingface/transformers/blob/
f143037789288ba532dada934a118e648e715738/
src/transformers/models/llama/modeling_llama.py#L126-L130
internal_dtype: set to "f32" or "f64" to enforce dtype in the calculation | def rope_padded(
xq: torch.Tensor,
xk: torch.Tensor,
xv: torch.Tensor,
cache_k: torch.Tensor,
cache_v: torch.Tensor,
attn_bias: BlockDiagonalCausalWithOffsetPaddedKeysMask,
*,
theta: float = 10000.0,
out_q: Optional[torch.Tensor] = None,
first_seqpos: Optional[torch.Tensor] = None,
seqpos: Optional[torch.Tensor] = None,
adjacents: bool = True,
internal_dtype: str = "",
):
"""
Performs RoPE (rotary embeddings) and kv-cache emplacement for a heterogeneous
batch for inference in the style given by
BlockDiagonalCausalWithOffsetPaddedKeysMask.
The batch is concatenated along the sequence dimension, so the
actual dim-0 length of all tensors is 1.
xq, xk and xv should be (1, slen, n_heads, dim), where
xq's n_heads can differ from xk and xv.
This function places the roped xk in the right place in cache_k, and
xv (unmodified) in the right place in cache_v, and returns out_q
(the roped xq) such that things are ready to call
xformers.ops.memory_efficient_attention(
out_q, cache_k, cache_v, attn_bias=attn_bias
)
This functionality is experimental. Its API might be changed without warnings.
Use it at your own risk.
Arguments:
xq: tensor of queries to apply rope to
xk: tensor of keys to apply rope to
xv: tensor of values to copy into cache_v
cache_k: cache of keys, MODIFIED IN PLACE
cache_v: cache of values, MODIFIED IN PLACE
attn_bias: details the layout of caches.
Used to determine frequencies for the
RoPE calculation as well as the locations in cache_k and cache_v
to write to. Must be on the device.
first_seqpos: Optionally a tensor containing the sequence position of the
beginning of the cache for each batch element.
Providing a tensor of zeros is the same as providing None.
This affects the numerical calculation but not which memory
locations are read or written.
seqpos: Optionally a 1D tensor containing the sequence position of each
query. This should have length equal to xq.shape[1] .
This affects the numerical calculation but not which memory
locations are read or written.
adjacents: If True, the inputs are in adjacent pairs along the final dim axis.
This is like the released LLaMA model.
If False, the dim axis is split in two equal pieces.
I.e. the features are ordered with all the real parts before all
the imaginary parts. This matches HuggingFace, e.g.
https://github.com/huggingface/transformers/blob/
f143037789288ba532dada934a118e648e715738/
src/transformers/models/llama/modeling_llama.py#L126-L130
internal_dtype: set to "f32" or "f64" to enforce dtype in the calculation
"""
if torch.is_grad_enabled() and (
xq.requires_grad
or xk.requires_grad
or xv.requires_grad
or cache_k.requires_grad
or cache_v.requires_grad
or out_q is not None
):
raise ValueError("Gradients not supported.")
assert _is_triton_available()
import triton
from ._triton.rope_padded_kernels import _rope_padded_kernel
n_total_queries = attn_bias.q_seqinfo.seqstart_py[-1]
cache_length = attn_bias.k_seqinfo.seqstart_py[-1]
ndim = xq.ndim
if ndim not in [4, 5]:
raise ValueError("Unexpected xq dimension")
xq_stride = xq.stride()
xk_stride = xk.stride()
xv_stride = xv.stride()
cache_k_stride = cache_k.stride()
cache_v_stride = cache_v.stride()
cache_k_shape = cache_k.shape
xk_shape = xk.shape
n_kv_heads = xk_shape[-2]
expected_kv_heads = n_kv_heads
if xk_stride[-2] == 0:
n_kv_heads = 1
expected_cache_heads = n_kv_heads
if n_kv_heads == 1 and cache_k_stride[-2] == 0:
# If there's 1 kv head, don't care how expanded
# cache_k is. User might expand before or after rope.
expected_cache_heads = cache_k_shape[-2]
if ndim == 4:
bsz, q_len, n_q_heads, dim = xq.shape
assert q_len == n_total_queries
if xk_shape != (1, n_total_queries, expected_kv_heads, dim):
raise ValueError(
f"unexpected k shape {xk_shape}: expected {(1, n_total_queries, expected_kv_heads, dim)}"
)
if xv.shape != (1, n_total_queries, expected_kv_heads, dim):
raise ValueError(
f"unexpected v shape {xv.shape}: expected {(1, n_total_queries, expected_kv_heads, dim)}"
)
if cache_k_shape != (1, cache_length, expected_cache_heads, dim):
raise ValueError("unexpected cache_k shape")
if cache_v.shape != (1, cache_length, expected_cache_heads, dim):
raise ValueError("unexpected cache_v shape")
n_groups = 1
out_q_stride: Tuple[int, ...] = (0, n_q_heads * dim, dim, 1)
else:
bsz, q_len, n_groups, n_q_heads, dim = xq.shape
assert q_len == n_total_queries
if xk_shape != (1, n_total_queries, n_groups, expected_kv_heads, dim):
raise ValueError(
f"unexpected k shape {xk_shape}: expected {(1, n_total_queries, n_groups, expected_kv_heads, dim)}"
)
if xv.shape != (1, n_total_queries, n_groups, expected_kv_heads, dim):
raise ValueError(
f"unexpected v shape {xv.shape}: expected {(1, n_total_queries, n_groups, expected_kv_heads, dim)}"
)
if cache_k_shape != (1, cache_length, n_groups, expected_cache_heads, dim):
raise ValueError(
f"unexpected cache_k shape {cache_k_shape}: "
f"expected {(1, cache_length, n_groups, expected_cache_heads, dim)}"
)
if cache_v.shape != (1, cache_length, n_groups, expected_cache_heads, dim):
raise ValueError(
f"unexpected cache_v shape {cache_v.shape}: "
f"expected {(1, cache_length, n_groups, expected_cache_heads, dim)}"
)
out_q_stride = (
0,
n_q_heads * dim * n_groups,
n_q_heads * dim,
dim,
1,
)
if bsz != 1:
raise ValueError(
"Expected batch size dimension to be 1 as batches should be concatenated."
)
if xq_stride[-1] != 1:
raise ValueError("Each q head must be contiguous")
if xk_stride[-1] != 1:
raise ValueError("Each k head must be contiguous")
if xv_stride[-1] != 1:
raise ValueError("Each v head must be contiguous")
if cache_k_stride[-1] != 1:
raise ValueError("Each cache_k head must be contiguous")
if cache_v_stride[-1] != 1:
raise ValueError("Each cache_v head must be contiguous")
n_total_heads = n_q_heads + 2 * n_kv_heads
v_start = n_total_heads - n_kv_heads
k_start = n_q_heads
if out_q is None:
out_q = xq.new_empty(xq.shape)
else:
if out_q.shape != xq.shape:
raise ValueError("Unexpected shape of out_q")
out_q_stride = out_q.stride()
if out_q_stride[-1] != 1:
raise ValueError("Each out_q head must be contiguous")
assert out_q is not None
logical_bsz = len(attn_bias.q_seqinfo.seqstart_py) - 1
if first_seqpos is not None and seqpos is not None:
raise ValueError("seqpos and first_seqpos may not both be provided")
stride_seqpos = 0
if first_seqpos is not None:
if first_seqpos.shape != (logical_bsz,):
shape = tuple(first_seqpos.shape)
raise ValueError(
f"first_seqpos.shape {shape} but ({logical_bsz},) expected."
)
stride_seqpos = first_seqpos.stride(0)
elif seqpos is not None:
if seqpos.shape != (n_total_queries,):
shape = tuple(seqpos.shape)
raise ValueError(f"seqpos.shape {shape} but ({n_total_queries},) expected.")
stride_seqpos = seqpos.stride(0)
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // xq.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(dim))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
device = xq.device
# Move these to the right device, like fmha does.
attn_bias.k_seqinfo.to(device)
attn_bias.q_seqinfo.to(device)
seqstartq = attn_bias.q_seqinfo.seqstart
seqstartk = attn_bias.k_seqinfo.seqstart
seqlenk = attn_bias.k_seqinfo.seqlen
assert internal_dtype in ["", "f32", "f64"]
# experiment with the order of dims here.
with torch.cuda.device(xq.device):
_rope_padded_kernel[
(attn_bias.q_seqinfo.max_seqlen, logical_bsz, n_total_heads * n_groups)
](
xq,
xk,
xv,
out_q,
cache_k,
cache_v,
seqstartq,
seqstartk,
seqlenk,
theta,
first_seqpos,
seqpos,
k_start,
v_start,
n_groups,
dim,
xq_stride[1],
xq_stride[2] if ndim == 5 else 0,
xq_stride[-2],
xk_stride[1],
xk_stride[2] if ndim == 5 else 0,
xk_stride[-2],
xv_stride[1],
xv_stride[2] if ndim == 5 else 0,
xv_stride[-2],
cache_k_stride[1],
cache_k_stride[2] if ndim == 5 else 0,
cache_k_stride[-2],
cache_v_stride[1],
cache_v_stride[2] if ndim == 5 else 0,
cache_v_stride[-2],
seqstartq.stride(0),
seqstartk.stride(0),
seqlenk.stride(0),
out_q_stride[1],
out_q_stride[2] if ndim == 5 else 0,
out_q_stride[-2],
stride_seqpos,
internal_dtype,
const_batch_strides=False,
cache_padding_length=0,
seqlenk_shift=0,
BLOCK_SIZE=BLOCK_SIZE,
adjacents=adjacents,
num_warps=num_warps,
)
return out_q |
Performs a fused all-gather followed by a linear op
It is equivalent to the following plain PyTorch code:
# like scattered_input but with first dim multiplied by group's world size
gathered_input = scattered_input.new_empty(...)
dist.all_gather_into_tensor(gathered_input, scattered_input, group=group)
return torch.nn.functional.linear(gathered_input, weight)
It achieves this by breaking down the matmul into smaller partial ops (as
many as the world size), each needing as input a different "contribution"
to the all-gather (by a different rank), and writing to a different chunk of
the output. Then, on one stream, it sends the local contribution to all
other ranks (first one rank over, then two, ...) while, on another stream,
it launches the sub-matmuls in the order in which the remote contributions
(which are the sub-matmuls' inputs) are supposed to arrive, so that ideally
none of the sub-matmuls will ever have to wait.
The idea comes from this paper: https://arxiv.org/abs/2302.05442
This method uses a staging buffer, which persists across calls, of the same
size as the all-gathered input tensor (i.e., the input's size times the
world size). If multiple inputs of multiple sizes are used, the staging
buffer will be the maximum needed by any of them. Each call, when it starts,
must first wait for the previous call to finish using the staging buffer. In
normal conditions, where there's some other operation between two calls,
this isn't an issue. However, when doing back-to-back calls (like in
benchmarks) it can introduce artificial delays. To hide them, we allow using
more than one staging buffer, which will be cycled through, thus trading
memory for speed. This can be controlled using the num_stripes argument.
Supports FP8 gemm for tensor-wise quantized weight and input tensors.
To enable FP8 gemm:
1. pass scattered_input and weight as quantized FP8 datatype
2. pass scale_scattered_input and scale_weight, the scales used to
quantize input and weight, respectively.
3. set out_dtype, if not specified, will be inferred from scattered_input type. | def fused_allgather_and_linear(
scattered_input: torch.Tensor,
weight: Union[torch.Tensor, List[torch.Tensor]],
*,
group: dist.ProcessGroup,
out: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
num_stripes: int = 1,
timeout_s: int = 60 * 60,
scale_scattered_input: Optional[torch.Tensor] = None,
scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
out_dtype: Optional[torch.dtype] = None,
**private_args_DO_NOT_USE,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Performs a fused all-gather followed by a linear op
It is equivalent to the following plain PyTorch code:
# like scattered_input but with first dim multiplied by group's world size
gathered_input = scattered_input.new_empty(...)
dist.all_gather_into_tensor(gathered_input, scattered_input, group=group)
return torch.nn.functional.linear(gathered_input, weight)
It achieves this by breaking down the matmul into smaller partial ops (as
many as the world size), each needing as input a different "contribution"
to the all-gather (by a different rank), and writing to a different chunk of
the output. Then, on one stream, it sends the local contribution to all
other ranks (first one rank over, then two, ...) while, on another stream,
it launches the sub-matmuls in the order in which the remote contributions
(which are the sub-matmuls' inputs) are supposed to arrive, so that ideally
none of the sub-matmuls will ever have to wait.
The idea comes from this paper: https://arxiv.org/abs/2302.05442
This method uses a staging buffer, which persists across calls, of the same
size as the all-gathered input tensor (i.e., the input's size times the
world size). If multiple inputs of multiple sizes are used, the staging
buffer will be the maximum needed by any of them. Each call, when it starts,
must first wait for the previous call to finish using the staging buffer. In
normal conditions, where there's some other operation between two calls,
this isn't an issue. However, when doing back-to-back calls (like in
benchmarks) it can introduce artificial delays. To hide them, we allow using
more than one staging buffer, which will be cycled through, thus trading
memory for speed. This can be controlled using the num_stripes argument.
Supports FP8 gemm for tensor-wise quantized weight and input tensors.
To enable FP8 gemm:
1. pass scattered_input and weight as quantized FP8 datatype
2. pass scale_scattered_input and scale_weight, the scales used to
quantize input and weight, respectively.
3. set out_dtype, if not specified, will be inferred from scattered_input type.
"""
world_size = group.size()
weights = weight if isinstance(weight, list) else [weight]
assert (scale_scattered_input is None) == (scale_weight is None)
if scale_weight is not None:
assert isinstance(weight, list) == isinstance(scale_weight, list)
scales_weights = (
scale_weight if isinstance(scale_weight, list) else [scale_weight]
)
assert len(weights) == len(scales_weights)
assert out_dtype is not None, "output_dtype is required with FP8"
else:
scales_weights = [torch.empty(1)] * len(weights)
assert all(w.ndim == 2 for w in weights)
assert scattered_input.ndim >= 2
assert all(scattered_input.shape[-1] == w.shape[-1] for w in weights)
assert scattered_input.is_contiguous()
gathered_input_shape = (world_size,) + scattered_input.shape
gathered_output_shapes = [gathered_input_shape[:-1] + w.shape[:-1] for w in weights]
if out is not None:
assert isinstance(out, list) == isinstance(weight, list)
gathered_outputs = out if isinstance(out, list) else [out]
assert len(gathered_outputs) == len(gathered_output_shapes)
assert all(
go.shape == gos for go, gos in zip(gathered_outputs, gathered_output_shapes)
)
assert all(go.is_contiguous() for go in gathered_outputs)
if out_dtype is not None:
if isinstance(out, list):
for o in out:
assert o.dtype == out_dtype
else:
assert out.dtype == out_dtype
else:
gathered_outputs = [
scattered_input.new_empty(
gos,
dtype=out_dtype if out_dtype is not None else scattered_input.dtype,
)
for gos in gathered_output_shapes
]
def my_matmul(
inputs: List[torch.Tensor],
src_rank: int,
stream_factory: Callable[[], torch.cuda.Stream],
) -> None:
for w, scale_weight, go in zip(weights, scales_weights, gathered_outputs):
with torch.cuda.stream(stream_factory()):
if _is_fp8_dtype(w.dtype):
output_amax = torch.empty(1, dtype=torch.float32, device=w.device)
torch._scaled_mm(
inputs[0],
w.t(),
out_dtype=go[src_rank].dtype,
scale_a=scale_scattered_input,
scale_b=scale_weight,
out=(go[src_rank], output_amax),
)
else:
torch.matmul(inputs[0], w.t(), out=go[src_rank])
_is_regular_matmul = all([not _is_fp8_dtype(w.dtype) for w in weights])
fused_allgather_and_anything(
[scattered_input],
my_matmul,
group=group,
num_stripes=num_stripes,
timeout_s=timeout_s,
_is_regular_matmul=_is_regular_matmul,
_extra_triton_args=dict(
bs=[w.t() for w in weights],
cs=[go.flatten(0, -2) for go in gathered_outputs],
cs_my_shard=None,
),
**private_args_DO_NOT_USE,
)
if isinstance(weight, list):
return [go.flatten(0, 1) for go in gathered_outputs]
else:
return gathered_outputs[0].flatten(0, 1) |
Performs a fused linear op followed by a reduce-scatter
It is equivalent to the following plain PyTorch code:
gathered_output = torch.nn.functional.linear(gathered_input, weight)
# like gathered_output but with first dim divided by group's world size
scattered_output = gathered_output.new_empty(...)
dist.reduce_scatter_tensor(scattered_output, gathered_output, group=group)
Supports FP8 gemm with tensor-wise quantized weights. To enable FP8 gemm:
1. pass weight and gathered_input as FP8 tensors
2. Set `scale_gathered_input` and `scale_weight` to the scales used to quantize
inputs and weight, respectively.
3. Set out_dtype to the desired output dtype. If not specified, it will be inferred from
gathered_input datatype. | def fused_linear_and_reducescatter(
gathered_input: torch.Tensor,
weight: Union[torch.Tensor, List[torch.Tensor]],
*,
group: dist.ProcessGroup,
out: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
num_stripes: int = 1,
timeout_s: int = 60 * 60,
scale_gathered_input: Optional[torch.Tensor] = None,
scale_weight: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
out_dtype: Optional[torch.dtype] = None,
**private_args_DO_NOT_USE,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""Performs a fused linear op followed by a reduce-scatter
It is equivalent to the following plain PyTorch code:
gathered_output = torch.nn.functional.linear(gathered_input, weight)
# like gathered_output but with first dim divided by group's world size
scattered_output = gathered_output.new_empty(...)
dist.reduce_scatter_tensor(scattered_output, gathered_output, group=group)
Supports FP8 gemm with tensor-wise quantized weights. To enable FP8 gemm:
1. pass weight and gathered_input as FP8 tensors
2. Set `scale_gathered_input` and `scale_weight` to the scales used to quantize
inputs and weight, respectively.
3. Set out_dtype to the desired output dtype. If not specified, it will be inferred from
gathered_input datatype.
"""
world_size = group.size()
weights = weight if isinstance(weight, list) else [weight]
assert (scale_gathered_input is None) == (scale_weight is None)
if scale_weight is not None:
assert isinstance(weight, list) == isinstance(scale_weight, list)
scales_weights = (
scale_weight if isinstance(scale_weight, list) else [scale_weight]
)
assert len(weights) == len(scales_weights)
assert out_dtype is not None, "output_dtype is required with FP8"
else:
scales_weights = [torch.empty(1)] * len(weights)
assert all(w.ndim == 2 for w in weights)
assert gathered_input.ndim >= 2
assert all(gathered_input.shape[-1] == w.shape[-1] for w in weights)
assert gathered_input.is_contiguous()
assert gathered_input.shape[0] % world_size == 0
gathered_input = gathered_input.view(
(world_size, gathered_input.shape[0] // world_size) + gathered_input.shape[1:]
)
gathered_output_shapes = [gathered_input.shape[:-1] + w.shape[:-1] for w in weights]
scattered_output_shapes = [gos[1:] for gos in gathered_output_shapes]
if out is not None:
assert isinstance(out, list) == isinstance(weight, list)
scattered_outputs = out if isinstance(out, list) else [out]
assert len(scattered_outputs) == scattered_output_shapes
assert all(so.device == gathered_input.device for so in scattered_outputs)
assert all(so.dtype == gathered_input.dtype for so in scattered_outputs)
assert all(
so.shape == sos
for so, sos in zip(scattered_outputs, scattered_output_shapes)
)
if out_dtype is not None:
if isinstance(out, list):
for o in out:
assert o.dtype == out_dtype
else:
assert out.dtype == out_dtype
else:
scattered_outputs = [
gathered_input.new_empty(
sos,
dtype=out_dtype if out_dtype is not None else gathered_input.dtype,
)
for sos in scattered_output_shapes
]
def my_matmul(
outputs: List[torch.Tensor],
dst_rank: int,
stream_factory: Callable[[], torch.cuda.Stream],
) -> None:
for w, scale_weight, o in zip(weights, scales_weights, outputs):
with torch.cuda.stream(stream_factory()):
if _is_fp8_dtype(w.dtype):
output_amax = torch.empty(1, dtype=torch.float32, device=o.device)
torch._scaled_mm(
gathered_input[dst_rank],
w.t(),
out_dtype=o.dtype,
scale_a=scale_gathered_input,
scale_b=scale_weight,
out=(o, output_amax),
)
else:
torch.matmul(gathered_input[dst_rank], w.t(), out=o)
_is_regular_matmul = all([not _is_fp8_dtype(w.dtype) for w in weights])
fused_anything_and_reducescatter(
my_matmul,
scattered_outputs,
group=group,
num_stripes=num_stripes,
timeout_s=timeout_s,
_is_regular_matmul=_is_regular_matmul,
_extra_triton_args=dict(
a_my_shard=None,
a=gathered_input.flatten(0, -2),
bs=[w.t() for w in weights],
),
**private_args_DO_NOT_USE,
)
if isinstance(weight, list):
return scattered_outputs
else:
return scattered_outputs[0] |
Returns the version of the cusparselt.so library that ships with pytorch 2.2+ | def _get_cusparselt_torch_version() -> Tuple[int, int, int]:
"""
Returns the version of the cusparselt.so library that ships with pytorch 2.2+
"""
lib_path = _get_cusparselt_lib()
if lib_path is None:
return (0, 0, 0)
lib = ctypes.CDLL(lib_path)
def get_version_part(version_part: int) -> int:
value = ctypes.c_int()
ret = lib.cusparseLtGetProperty(version_part, ctypes.byref(value))
if ret != 0:
return -1
return value.value
return (get_version_part(0), get_version_part(1), get_version_part(2)) |
Computes a SwiGLU block given the weights/bias of the 3
linear layers.
- It is recommended to keep ``op=None`` so the best implementation available for the inputs will be used.
:Equivalent pytorch code:
.. code-block:: python
x1 = F.linear(x, w1, b1)
x2 = F.linear(x, w2, b2)
hidden = F.silu(x1) * x2
return F.linear(hidden, w3, b3)
:Packing weights:
To allow faster implementations, it's recommended to have w1/w2 come from the same storage, as in:
.. code-block:: python
w1, w2 = xformers.ops.unbind(w12, 0)
:Supported hardware:
This operator is only optimized on A100+ on ``torch.half`` or ``torch.bfloat16`` (autocast is supported), and will fallback to a functional pytorch implementation otherwise. | def swiglu(
x: torch.Tensor,
w1: torch.Tensor,
b1: Optional[torch.Tensor],
w2: torch.Tensor,
b2: Optional[torch.Tensor],
w3: torch.Tensor,
b3: Optional[torch.Tensor],
*,
op: Optional[SwiGLUOp] = None,
) -> torch.Tensor:
"""
Computes a SwiGLU block given the weights/bias of the 3
linear layers.
- It is recommended to keep ``op=None`` so the best implementation \
available for the inputs will be used.
:Equivalent pytorch code:
.. code-block:: python
x1 = F.linear(x, w1, b1)
x2 = F.linear(x, w2, b2)
hidden = F.silu(x1) * x2
return F.linear(hidden, w3, b3)
:Packing weights:
To allow faster implementations, it's recommended to have w1/w2 come from the same storage, as in:
.. code-block:: python
w1, w2 = xformers.ops.unbind(w12, 0)
:Supported hardware:
This operator is only optimized on A100+ on ``torch.half`` or ``torch.bfloat16`` \
(autocast is supported), and will fallback to a functional pytorch \
implementation otherwise.
"""
batch_shape = x.shape[:-1]
x = x.reshape([-1, x.shape[-1]])
if w1.ndim != 2 or w1.shape != w2.shape:
raise ValueError(f"Invalid shapes for w1: {w1.shape} / w2: {w2.shape}")
if b1 is not None:
if b1.ndim != 1 or b1.shape[0] != w1.shape[0]:
raise ValueError(f"Invalid shapes for b1: {b1.shape}")
if b2 is not None:
if b2.ndim != 1 or b2.shape[0] != w2.shape[0]:
raise ValueError(f"Invalid shapes for b2: {b2.shape}")
if w3.ndim != 2 or w3.shape[1] != w2.shape[0]:
raise ValueError(f"Invalid shape for w3: {w3.shape}")
if b3 is not None:
if b3.ndim != 1 or b3.shape[0] != w3.shape[0]:
raise ValueError(f"Invalid shapes for w3: {w3.shape} / b3: {b3.shape}")
if op is None:
op = SwiGLUOpDispatch.from_arguments(x, w1, b1, w2, b2, w3, b3).op
if not op.PACKED_WEIGHTS:
return op(x, w1, b1, w2, b2, w3, b3).reshape([*batch_shape, -1])
w1w2 = stack_or_none((w1, w2), dim=0)
if b1 is not None and b2 is not None:
b1b2: Optional[torch.Tensor] = stack_or_none((b1, b2), dim=0)
if b1b2 is None:
raise NotImplementedError("b1/b2 needs to be properly packed")
else:
b1b2 = None
assert b1 is None and b2 is None
if w1w2 is None:
raise NotImplementedError("w1/w2 needs to be properly packed")
return op(x, w1w2, b1b2, w3, b3).reshape([*batch_shape, -1]) |
Computes a SwiGLU block given the weights/bias of the 3
linear layers.
:Equivalent pytorch code:
.. code-block:: python
x1 = F.linear(x, w1, b1)
x2 = F.linear(x, w2, b2)
hidden = F.silu(x1) * x2
return F.linear(hidden, w3, b3)
:Supported hardware:
This operator is only optimized on A100+ on ``torch.half`` or ``torch.bfloat16`` (autocast is supported), and will fallback to a functional pytorch implementation otherwise. | def swiglu_packed(
x: torch.Tensor,
w1w2: torch.Tensor,
b1b2: Optional[torch.Tensor],
w3: torch.Tensor,
b3: Optional[torch.Tensor],
*,
op: SwiGLUOp,
) -> torch.Tensor:
"""
Computes a SwiGLU block given the weights/bias of the 3
linear layers.
:Equivalent pytorch code:
.. code-block:: python
x1 = F.linear(x, w1, b1)
x2 = F.linear(x, w2, b2)
hidden = F.silu(x1) * x2
return F.linear(hidden, w3, b3)
:Supported hardware:
This operator is only optimized on A100+ on ``torch.half`` or ``torch.bfloat16`` \
(autocast is supported), and will fallback to a functional pytorch \
implementation otherwise.
"""
batch_shape = x.shape[:-1]
x = x.reshape([-1, x.shape[-1]])
if b3 is not None:
if b3.ndim != 1 or b3.shape[0] != w3.shape[0]:
raise ValueError(f"Invalid shapes for w3: {w3.shape} / b3: {b3.shape}")
assert op.PACKED_WEIGHTS, "Not implemented PACKED_WEIGHTS"
return op(x, w1w2, b1b2, w3, b3).reshape([*batch_shape, -1]) |
Multiply two matrices given as grids of tiles
It performs the matmul between A and B, which are given as two-dimensional
grids of tiles (i.e., blocks), represented as lists of lists of tensors.
The output will itself be a matrix in such a form. Formally:
out[m][n] = sum(a[m][k] @ b[k][n] for k in range(...))
with the obvious constraints needed to make it work, in terms of number of
tiles and sizes of each tile.
The interest of this operator is to improve performance by avoding wave
quantization effects when doing independent matrix multiplications in
series. Sometimes, when these matmuls have one operand in common, this can
also be addressed by concatenating the other operands into a single matrix,
and issuing a single matmul. However this isn't always possible (e.g., might
break the checkpoint format) and it's an anti-pattern, as it obscures the
logic (e.g., changing the modelling code out of performance reasons). This
tiled matmul performs the same computation as if the matrices were merged,
without merging them, simply through a smarter memory addressing scheme.
The tiled matmul is less generic than a grouped matmul, which can also help
with wave quantization, and doesn't need the matmuls to have the same lhs
or rhs operand. However, a grouped matmul will write the result of each
matmul to a separate output matrix, whereas the tiled matmul allows to add
them together into a single output. This is needed during the backward pass
of a linear layer, and it's the reason we wrote this instead of using a
grouped matmul.
The tiled matmul is implemented using a custom Triton kernel, which puts
constraints on the strides of the tiles. All rows of A must have the same
K stride, all columns of A must have the same M stride, and so on.
Currently the tiled matmul supports at most three tiles on each dimension,
although fewer can also be given. This is because we needed it to fuse the
query, key and value weights of an attention layer. This limit can be
increased if needed.
This operator is differentiable. | def tiled_matmul(
a: List[List[torch.Tensor]],
b: List[List[torch.Tensor]],
) -> List[List[torch.Tensor]]:
"""Multiply two matrices given as grids of tiles
It performs the matmul between A and B, which are given as two-dimensional
grids of tiles (i.e., blocks), represented as lists of lists of tensors.
The output will itself be a matrix in such a form. Formally:
out[m][n] = sum(a[m][k] @ b[k][n] for k in range(...))
with the obvious constraints needed to make it work, in terms of number of
tiles and sizes of each tile.
The interest of this operator is to improve performance by avoding wave
quantization effects when doing independent matrix multiplications in
series. Sometimes, when these matmuls have one operand in common, this can
also be addressed by concatenating the other operands into a single matrix,
and issuing a single matmul. However this isn't always possible (e.g., might
break the checkpoint format) and it's an anti-pattern, as it obscures the
logic (e.g., changing the modelling code out of performance reasons). This
tiled matmul performs the same computation as if the matrices were merged,
without merging them, simply through a smarter memory addressing scheme.
The tiled matmul is less generic than a grouped matmul, which can also help
with wave quantization, and doesn't need the matmuls to have the same lhs
or rhs operand. However, a grouped matmul will write the result of each
matmul to a separate output matrix, whereas the tiled matmul allows to add
them together into a single output. This is needed during the backward pass
of a linear layer, and it's the reason we wrote this instead of using a
grouped matmul.
The tiled matmul is implemented using a custom Triton kernel, which puts
constraints on the strides of the tiles. All rows of A must have the same
K stride, all columns of A must have the same M stride, and so on.
Currently the tiled matmul supports at most three tiles on each dimension,
although fewer can also be given. This is because we needed it to fuse the
query, key and value weights of an attention layer. This limit can be
increased if needed.
This operator is differentiable.
"""
ab_tree_values, ab_tree_spec = tree_flatten((a, b))
c_tree_spec, *c_tree_values = _TiledMatmul.apply(ab_tree_spec, *ab_tree_values)
c = tree_unflatten(list(c_tree_values), c_tree_spec)
return c |
If the tensors are already stacked on dimension :code:`dim`, returns the strides of the stacked tensors. Otherwise returns :code:`None`. | def get_stack_strides(
tensors: Sequence[torch.Tensor], dim: int
) -> Optional[Tuple[int, ...]]:
"""
If the tensors are already stacked on dimension :code:`dim`, \
returns the strides of the stacked tensors. \
Otherwise returns :code:`None`.
"""
if len(tensors) <= 1 or dim > tensors[0].ndim:
return None
final_stride = []
for i in range(tensors[0].ndim + 1):
if i == dim:
final_stride.append(
tensors[1].storage_offset() - tensors[0].storage_offset()
)
continue
if i > dim:
i -= 1
final_stride.append(tensors[0].stride(i))
storage_data_ptr: Optional[int] = None
for i, x in enumerate(tensors[1:]):
# Sanity checks
if x.shape != tensors[0].shape:
return None
if x.stride() != tensors[0].stride():
return None
if (
x.storage_offset()
!= tensors[0].storage_offset() + (i + 1) * final_stride[dim]
):
return None
if storage_data_ptr is None:
storage_data_ptr = _get_storage_base(tensors[0])
# Actual storage check
if _get_storage_base(x) != storage_data_ptr:
return None
return tuple(final_stride) |
Does exactly the same as :attr:`torch.unbind` for the forward.
In backward, avoids a :attr:`torch.cat` if the gradients
are already multiple views of the same storage | def unbind(x: torch.Tensor, dim: int) -> Tuple[torch.Tensor, ...]:
"""
Does exactly the same as :attr:`torch.unbind` for the forward.
In backward, avoids a :attr:`torch.cat` if the gradients
are already multiple views of the same storage
"""
return _Unbind.apply(x, dim) |
Does exactly the same as :attr:`torch.stack` if the tensors can be concatenated
without any memory operation. Otherwise returns None. | def stack_or_none(tensors: Sequence[torch.Tensor], dim: int) -> torch.Tensor:
"""
Does exactly the same as :attr:`torch.stack` if the tensors can be concatenated
without any memory operation. Otherwise returns None.
"""
return _StackOrNone.apply(dim, *tensors) |
CK kernel throws "Memory access fault by GPU node-2" when B * T >= 2**20, might be some index overflow.
To reproduce, remove this function and run benchmark_mem_eff_attention with ParlAI model shape (256, 4096, 16, 64).
This needs further debugging, for now let's not support such shapes. | def _check_large_shapes(reasons: List[str], inp: Inputs) -> None:
"""CK kernel throws "Memory access fault by GPU node-2" when B * T >= 2**20, might be some index overflow.
To reproduce, remove this function and run benchmark_mem_eff_attention with ParlAI model shape (256, 4096, 16, 64).
This needs further debugging, for now let's not support such shapes.
"""
b_t_limit = 1024**2
q_too_large = inp.query.shape[0] * inp.query.shape[1] >= b_t_limit
k_too_large = inp.key.shape[0] * inp.key.shape[1] >= b_t_limit
v_too_large = inp.value.shape[0] * inp.value.shape[1] >= b_t_limit
if q_too_large or k_too_large or v_too_large:
reasons.append(
"Input is too large: product of first two dimensions of q/k/v must be < 2**20"
) |
Computes the best operator for forward
Raises:
NotImplementedError: if not operator was found
Returns:
AttentionOp: The best operator for the configuration | def _dispatch_fw(inp: Inputs, needs_gradient: bool) -> Type[AttentionFwOpBase]:
"""Computes the best operator for forward
Raises:
NotImplementedError: if not operator was found
Returns:
AttentionOp: The best operator for the configuration
"""
return _run_priority_list(
"memory_efficient_attention_forward",
_dispatch_fw_priority_list(inp, needs_gradient),
inp,
) |
We want to be able to collapse the G/H dimensions together | def _check_strides_for_bmghk(x: torch.Tensor, name: str, reasons: List[str]) -> None:
"""
We want to be able to collapse the G/H dimensions together
"""
if x.ndim == 5:
stride_g, stride_h = x.stride(2), x.stride(3)
if x.shape[2] == 1:
return
if x.shape[3] == 1 or stride_h == 0:
return
if stride_g != stride_h * x.shape[-2]:
reasons.append(
f"GQA is only supported when the G/H dimensions are contiguous\n"
f" {name}.stride: {x.stride()}\n"
f" {name}.shape : {list(x.shape)}"
) |
Implements the memory-efficient attention mechanism following
`"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_.
:Inputs shape:
- Input tensors must be in format ``[B, M, H, K]``, where B is the batch size, M the sequence length, H the number of heads, and K the embeding size per head
- If inputs have dimension 3, it is assumed that the dimensions are ``[B, M, K]`` and ``H=1``
- Inputs can also be of dimension 5 with GQA - see note below
- Inputs can be non-contiguous - we only require the last dimension's stride to be 1
:Equivalent pytorch code:
.. code-block:: python
scale = 1.0 / query.shape[-1] ** 0.5
query = query * scale
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attn = query @ key.transpose(-2, -1)
if attn_bias is not None:
attn = attn + attn_bias
attn = attn.softmax(-1)
attn = F.dropout(attn, p)
attn = attn @ value
return attn.transpose(1, 2)
:Examples:
.. code-block:: python
import xformers.ops as xops
# Compute regular attention
y = xops.memory_efficient_attention(q, k, v)
# With a dropout of 0.2
y = xops.memory_efficient_attention(q, k, v, p=0.2)
# Causal attention
y = xops.memory_efficient_attention(
q, k, v,
attn_bias=xops.LowerTriangularMask()
)
:Supported hardware:
NVIDIA GPUs with compute capability above 6.0 (P100+), datatype ``f16``, ``bf16`` and ``f32``.
:EXPERIMENTAL: Using with Multi Query Attention (MQA) and Grouped Query Attention (GQA):
MQA/GQA is an experimental feature supported only for the forward pass.
If you have 16 heads in query, and 2 in key/value, you can provide 5-dim tensors
in the ``[B, M, G, H, K]`` format, where ``G`` is the number of head groups (here 2), and
``H`` is the number of heads per group (8 in the example).
Please note that xFormers will not automatically broadcast the inputs, so you will need
to broadcast it manually before calling `memory_efficient_attention`.
:GQA/MQA example:
.. code-block:: python
import torch
import xformers.ops as xops
B, M, K = 3, 32, 128
kwargs = dict(device="cuda", dtype=torch.float16)
q = torch.randn([B, M, 8, K], **kwargs)
k = torch.randn([B, M, 2, K], **kwargs)
v = torch.randn([B, M, 2, K], **kwargs)
out_gqa = xops.memory_efficient_attention(
q.reshape([B, M, 2, 4, K]),
k.reshape([B, M, 2, 1, K]).expand([B, M, 2, 4, K]),
v.reshape([B, M, 2, 1, K]).expand([B, M, 2, 4, K]),
)
Raises:
NotImplementedError: if there is no operator available to compute the MHA
ValueError: if inputs are invalid
:parameter query: Tensor of shape ``[B, Mq, H, K]``
:parameter key: Tensor of shape ``[B, Mkv, H, K]``
:parameter value: Tensor of shape ``[B, Mkv, H, Kv]``
:parameter attn_bias: Bias to apply to the attention matrix - defaults to no masking. For common biases implemented efficiently in xFormers, see :attr:`xformers.ops.fmha.attn_bias.AttentionBias`. This can also be a :attr:`torch.Tensor` for an arbitrary mask (slower).
:parameter p: Dropout probability. Disabled if set to ``0.0``
:parameter scale: Scaling factor for ``Q @ K.transpose()``. If set to ``None``, the default scale (q.shape[-1]**-0.5) will be used.
:parameter op: The operators to use - see :attr:`xformers.ops.AttentionOpBase`. If set to ``None`` (recommended), xFormers will dispatch to the best available operator, depending on the inputs and options.
:return: multi-head attention Tensor with shape ``[B, Mq, H, Kv]`` | def memory_efficient_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[AttentionOp] = None,
output_dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
"""Implements the memory-efficient attention mechanism following
`"Self-Attention Does Not Need O(n^2) Memory" <http://arxiv.org/abs/2112.05682>`_.
:Inputs shape:
- Input tensors must be in format ``[B, M, H, K]``, where B is the batch size, M \
the sequence length, H the number of heads, and K the embeding size per head
- If inputs have dimension 3, it is assumed that the dimensions are ``[B, M, K]`` and ``H=1``
- Inputs can also be of dimension 5 with GQA - see note below
- Inputs can be non-contiguous - we only require the last dimension's stride to be 1
:Equivalent pytorch code:
.. code-block:: python
scale = 1.0 / query.shape[-1] ** 0.5
query = query * scale
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
attn = query @ key.transpose(-2, -1)
if attn_bias is not None:
attn = attn + attn_bias
attn = attn.softmax(-1)
attn = F.dropout(attn, p)
attn = attn @ value
return attn.transpose(1, 2)
:Examples:
.. code-block:: python
import xformers.ops as xops
# Compute regular attention
y = xops.memory_efficient_attention(q, k, v)
# With a dropout of 0.2
y = xops.memory_efficient_attention(q, k, v, p=0.2)
# Causal attention
y = xops.memory_efficient_attention(
q, k, v,
attn_bias=xops.LowerTriangularMask()
)
:Supported hardware:
NVIDIA GPUs with compute capability above 6.0 (P100+), datatype ``f16``, ``bf16`` and ``f32``.
:EXPERIMENTAL: Using with Multi Query Attention (MQA) and Grouped Query Attention (GQA):
MQA/GQA is an experimental feature supported only for the forward pass.
If you have 16 heads in query, and 2 in key/value, you can provide 5-dim tensors
in the ``[B, M, G, H, K]`` format, where ``G`` is the number of head groups (here 2), and
``H`` is the number of heads per group (8 in the example).
Please note that xFormers will not automatically broadcast the inputs, so you will need
to broadcast it manually before calling `memory_efficient_attention`.
:GQA/MQA example:
.. code-block:: python
import torch
import xformers.ops as xops
B, M, K = 3, 32, 128
kwargs = dict(device="cuda", dtype=torch.float16)
q = torch.randn([B, M, 8, K], **kwargs)
k = torch.randn([B, M, 2, K], **kwargs)
v = torch.randn([B, M, 2, K], **kwargs)
out_gqa = xops.memory_efficient_attention(
q.reshape([B, M, 2, 4, K]),
k.reshape([B, M, 2, 1, K]).expand([B, M, 2, 4, K]),
v.reshape([B, M, 2, 1, K]).expand([B, M, 2, 4, K]),
)
Raises:
NotImplementedError: if there is no operator available to compute the MHA
ValueError: if inputs are invalid
:parameter query: Tensor of shape ``[B, Mq, H, K]``
:parameter key: Tensor of shape ``[B, Mkv, H, K]``
:parameter value: Tensor of shape ``[B, Mkv, H, Kv]``
:parameter attn_bias: Bias to apply to the attention matrix - defaults to no masking. \
For common biases implemented efficiently in xFormers, see :attr:`xformers.ops.fmha.attn_bias.AttentionBias`. \
This can also be a :attr:`torch.Tensor` for an arbitrary mask (slower).
:parameter p: Dropout probability. Disabled if set to ``0.0``
:parameter scale: Scaling factor for ``Q @ K.transpose()``. If set to ``None``, the default \
scale (q.shape[-1]**-0.5) will be used.
:parameter op: The operators to use - see :attr:`xformers.ops.AttentionOpBase`. \
If set to ``None`` (recommended), xFormers \
will dispatch to the best available operator, depending on the inputs \
and options.
:return: multi-head attention Tensor with shape ``[B, Mq, H, Kv]``
"""
return _memory_efficient_attention(
Inputs(
query=query,
key=key,
value=value,
p=p,
attn_bias=attn_bias,
scale=scale,
output_dtype=output_dtype,
),
op=op,
) |
Calculates the forward pass of :attr:`xformers.ops.memory_efficient_attention`. | def memory_efficient_attention_forward(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[Type[AttentionFwOpBase]] = None,
output_dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
"""
Calculates the forward pass of :attr:`xformers.ops.memory_efficient_attention`.
"""
return _memory_efficient_attention_forward(
Inputs(
query=query,
key=key,
value=value,
p=p,
attn_bias=attn_bias,
scale=scale,
output_dtype=output_dtype,
),
op=op,
) |
Returns a tuple (output, lse), where `lse` can be used to compute the backward pass later.
See :attr:`xformers.ops.memory_efficient_attention` for an explanation of the arguments
See :attr:`xformers.ops.memory_efficient_attention_backward` for running the backward pass | def memory_efficient_attention_forward_requires_grad(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[Type[AttentionFwOpBase]] = None,
output_dtype: Optional[torch.dtype] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns a tuple (output, lse), where `lse` can be used to compute the backward pass later.
See :attr:`xformers.ops.memory_efficient_attention` for an explanation of the arguments
See :attr:`xformers.ops.memory_efficient_attention_backward` for running the backward pass
"""
if p != 0.0:
raise NotImplementedError(
"dropout is not supported on the non-autograd API."
" If you want to use dropout, please call `memory_efficient_attention` directly"
)
out, ctx = _memory_efficient_attention_forward_requires_grad(
Inputs(
query=query,
key=key,
value=value,
p=p,
attn_bias=attn_bias,
scale=scale,
output_dtype=output_dtype,
),
op=op,
)
return out, ctx.lse |
Computes the gradient of the attention.
Returns a tuple (dq, dk, dv)
See :attr:`xformers.ops.memory_efficient_attention` for an explanation of the arguments.
`lse` is the tensor returned by
:attr:`xformers.ops.memory_efficient_attention_forward_requires_grad` | def memory_efficient_attention_backward(
grad: torch.Tensor,
output: torch.Tensor,
lse: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[Type[AttentionBwOpBase]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes the gradient of the attention.
Returns a tuple (dq, dk, dv)
See :attr:`xformers.ops.memory_efficient_attention` for an explanation of the arguments.
`lse` is the tensor returned by
:attr:`xformers.ops.memory_efficient_attention_forward_requires_grad`
"""
if p != 0.0:
raise NotImplementedError(
"dropout is not supported on the non-autograd API."
" If you want to use dropout, please call `memory_efficient_attention` directly"
)
gradients = _memory_efficient_attention_backward(
Context(out=output, lse=lse),
Inputs(
query=query, key=key, value=value, p=p, attn_bias=attn_bias, scale=scale
),
grad,
op=op,
)
return (gradients.dq, gradients.dk, gradients.dv) |
Warning: grad/ctx.out is potentially in BMK format | def _memory_efficient_attention_backward(
ctx: Context,
inp: Inputs,
grad: torch.Tensor,
op: Optional[Type[AttentionBwOpBase]],
*,
_skip_op_checks: bool = False,
) -> Gradients:
"""Warning: grad/ctx.out is potentially in BMK format"""
inp.validate_inputs()
if grad.ndim != inp.query.ndim or grad.ndim != ctx.out.ndim:
raise ValueError(
"All tensors should be either in BMK (ndim=3) or BMHK (ndim=4) format. \n"
f"grad.shape : {grad.shape} \n"
f"out.shape : {ctx.out.shape} \n"
f"query.shape: {inp.query.shape}"
)
shape_dq, shape_dk, shape_dv = tuple(
x.shape for x in (inp.query, inp.key, inp.value)
)
inp.normalize_bmhk()
# LSE has shape [B, H, M] while query has shape [B, M, H, K]
if (
ctx.lse.ndim != 3
# Dim 0
or (
not isinstance(inp.attn_bias, BlockDiagonalMask)
and ctx.lse.shape[0] != inp.query.shape[0]
)
or (
isinstance(inp.attn_bias, BlockDiagonalMask)
and ctx.lse.shape[0] != inp.attn_bias.q_seqinfo.seqstart.shape[0] - 1
)
# Dim 1
or ctx.lse.shape[1] != inp.query.shape[2]
# Dim 2
or (
not isinstance(inp.attn_bias, BlockDiagonalMask)
and ctx.lse.shape[2] < inp.query.shape[1]
)
):
raise ValueError(
"Input tensors have incompatible shapes."
f"lse.shape : {ctx.lse.shape} \n"
f"query.shape : {inp.query.shape}"
)
grad = bmk2bmhk(grad, 1)
ctx.out = bmk2bmhk(ctx.out, 1)
if op is None:
op = _dispatch_bw(inp)
elif not _skip_op_checks:
_ensure_op_supports_or_raise(
ValueError, "memory_efficient_attention_backward", op, inp
)
grads = op.apply(ctx, inp, grad)
grads.dq = grads.dq.reshape(shape_dq)
grads.dk = grads.dk.reshape(shape_dk)
grads.dv = grads.dv.reshape(shape_dv)
return grads |
Returns a tuple (output, lse), where `output` is the attention and `lse`
is a least squared error. The cat'ed outputs of calls to this with the same query
and separate keys and values can be merged with merge_attentions to obtain
the attention of the queries against the disjoint union of the keys and values. | def memory_efficient_attention_partial(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_bias: Optional[Union[torch.Tensor, AttentionBias]] = None,
p: float = 0.0,
scale: Optional[float] = None,
*,
op: Optional[Type[AttentionFwOpBase]] = None,
output_dtype: Optional[torch.dtype] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns a tuple (output, lse), where `output` is the attention and `lse`
is a least squared error. The cat'ed outputs of calls to this with the same query
and separate keys and values can be merged with merge_attentions to obtain
the attention of the queries against the disjoint union of the keys and values.
"""
if p != 0.0:
raise NotImplementedError("dropout is not supported.")
if not isinstance(
attn_bias,
(
type(None),
BlockDiagonalGappyKeysMask,
BlockDiagonalPaddedKeysMask,
PagedBlockDiagonalPaddedKeysMask,
LowerTriangularFromBottomRightMask,
LowerTriangularMask,
),
):
raise ValueError(
f"{type(attn_bias)} is not supported in memory_efficient_attention_partial."
)
out, ctx = _memory_efficient_attention_forward_requires_grad(
Inputs(
query=query,
key=key,
value=value,
p=p,
attn_bias=attn_bias,
scale=scale,
output_dtype=output_dtype,
is_partial=True,
),
op=op,
)
return out, ctx.lse |
Combine attention output computed on different parts of K/V for the same
query to get attention on the whole K/V. See https://arxiv.org/abs/2402.05099
The result is equal to
Out_full = (Out1 * exp(LSE1) + Out2 * exp(LSE2) + ...) / (exp(LSE1) + exp(LSE2) + ...)
LSE_full = log(exp(LSE1) + exp(LSE2) + ...)
Args:
attn_split: attention outputs for chunks,
either as a list of tensors of shapes [B, M, G, H, Kq] or [B, M, H, Kq]
or as a single tensor of shape [num_chunks, B, M, G, H, Kq]
or [num_chunks, B, M, H, Kq]
lse_split: LSE for chunks,
either as a list of tensors of shapes [B, G, H, M] or [B, H, M]
or as a single tensor of shape [num_chunks, B, G, H, M] or [num_chunks, B, H, M]
write_lse: whether to output LSE
out_dype: dtype of attn_out
Returns:
attn_out: [B, M, G, H, Kq] or [B, M, H, Kq]
lse_out: [B, G, H, M] or [B, H, M] if write_lse
or None otherwise | def merge_attentions(
attn_split: Union[torch.Tensor, List[torch.Tensor]],
lse_split: Union[torch.Tensor, List[torch.Tensor]],
write_lse: bool = True,
output_dtype: Optional[torch.dtype] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Combine attention output computed on different parts of K/V for the same
query to get attention on the whole K/V. See https://arxiv.org/abs/2402.05099
The result is equal to
Out_full = (Out1 * exp(LSE1) + Out2 * exp(LSE2) + ...) / (exp(LSE1) + exp(LSE2) + ...)
LSE_full = log(exp(LSE1) + exp(LSE2) + ...)
Args:
attn_split: attention outputs for chunks,
either as a list of tensors of shapes [B, M, G, H, Kq] or [B, M, H, Kq]
or as a single tensor of shape [num_chunks, B, M, G, H, Kq]
or [num_chunks, B, M, H, Kq]
lse_split: LSE for chunks,
either as a list of tensors of shapes [B, G, H, M] or [B, H, M]
or as a single tensor of shape [num_chunks, B, G, H, M] or [num_chunks, B, H, M]
write_lse: whether to output LSE
out_dype: dtype of attn_out
Returns:
attn_out: [B, M, G, H, Kq] or [B, M, H, Kq]
lse_out: [B, G, H, M] or [B, H, M] if write_lse
or None otherwise
"""
attn_is_concat = isinstance(attn_split, torch.Tensor)
lse_is_concat = isinstance(lse_split, torch.Tensor)
concat_path = attn_is_concat and lse_is_concat
if not concat_path:
if attn_is_concat:
attn_split = cast(torch.Tensor, attn_split).unbind(0)
if lse_is_concat:
lse_split = cast(torch.Tensor, lse_split).unbind(0)
if concat_path:
attn_split = cast(torch.Tensor, attn_split)
lse_split = cast(torch.Tensor, lse_split)
if attn_split.ndim != lse_split.ndim + 1:
raise ValueError(
f"Incompatible input shapes: {attn_split.shape=}, {lse_split.shape=}"
)
is_bmhk = attn_split.ndim == 5
if is_bmhk:
attn_split = attn_split.unsqueeze(3)
lse_split = lse_split.unsqueeze(2)
num_chunks, B, M, G, H, Kq = attn_split.shape
num_chunks1, B1, G1, H1, M1 = lse_split.shape
if B != B1 or G != G1 or H != H1 or num_chunks != num_chunks1 or M != M:
raise ValueError(
f"Incompatible input shapes: {attn_split.shape=} {lse_split.shape=} "
f"{B}/{B1}, {G}/{G1}, {H}/{H1}, {num_chunks}/{num_chunks1}, {M}/{M}"
)
attn_split = attn_split.permute(1, 3, 4, 0, 2, 5)
lse_split = lse_split.permute(1, 2, 3, 0, 4)
device = attn_split.device
attn_dtype = attn_split.dtype
lse_dtype = lse_split.dtype
merge_func: Any = triton_splitk.merge_attentions
else:
num_chunks = len(attn_split)
if len(lse_split) != num_chunks:
raise ValueError(
f"Incompatible number of LSE and attention chunks: {len(attn_split)=}, {len(lse_split)=}"
)
attn_unsqueezed = []
lse_unsqueezed = []
is_bmhk = False
for i in range(num_chunks):
if attn_split[i].ndim != lse_split[i].ndim + 1:
raise ValueError(
f"Incompatible input shapes for chunk {i}: {attn_split[i].shape=}, {lse_split[i].shape=}"
)
is_bmhk = attn_split[i].ndim == 4
if is_bmhk:
attn_unsqueezed.append(attn_split[i].unsqueeze(2))
lse_unsqueezed.append(lse_split[i].unsqueeze(1))
else:
attn_unsqueezed.append(attn_split[i])
lse_unsqueezed.append(lse_split[i])
attn_split, lse_split = attn_unsqueezed, lse_unsqueezed
B, M, G, H, Kq = attn_split[0].shape
B1, G1, H1, M1 = lse_split[0].shape
if B != B1 or G != G1 or H != H1 or M != M:
raise ValueError(
f"Incompatible input shapes: {attn_split[0].shape=}, {lse_split[0].shape=} "
f"{B}/{B1}, {G}/{G1}, {H}/{H1}, {M}/{M}"
)
for i in range(num_chunks):
if attn_split[i].shape != (B, M, G, H, Kq):
raise ValueError(
f"Incompatible input shapes for attention chunk {i}: "
f"{attn_split[i].shape=}, {(B, M, G, H, Kq)=}"
)
if lse_split[i].shape != (B, G, H, M):
raise ValueError(
f"Incompatible input shapes for LSE chunk {i}: "
f"{lse_split[i].shape=}, {(B, G, H, M)=}"
)
attn_split[i] = attn_split[i].permute(0, 2, 3, 1, 4) # to (B, G, H, M, Kq)
device = attn_split[0].device
attn_dtype = attn_split[0].dtype
lse_dtype = lse_split[0].dtype
merge_func = triton_splitk.merge_attentions_varargs
attn_out = torch.empty(
B,
M,
G,
H,
Kq,
device=device,
dtype=output_dtype or attn_dtype,
)
if write_lse:
lse_out = torch.empty(B, G, H, M, device=device, dtype=lse_dtype)
else:
lse_out = None
merge_func(attn_out, lse_out, attn_split, lse_split) # type: ignore
if is_bmhk:
attn_out = attn_out[:, :, 0]
if lse_out is not None:
lse_out = lse_out[:, 0]
return attn_out, lse_out |
Each letter in this diagram is a whole row of length dim.
INPUT xq xk xv
head_dim ─►
batch qqqqqq kk vv
│ qqqqqq kk vv
▼ qqqqqq kk vv
head_idx: (goes across all heads of all 3 inputs)
▲ ▲ ▲ ▲ ▲ ▲
│ │ │ │ │ │
│ │
0 k_start │v_start │n_total_heads
│ │
│ │
k_start v_start
Output is to out_q (same shape as xq), an xk-shaped part
of cache_k and an xv-shaped part of cache_v | def _rope_padded_kernel(
xq,
xk,
xv,
out_q,
cache_k,
cache_v,
seqstartq,
seqstartk,
seqlenk,
theta,
first_seqpos,
seqpos,
k_start: tl.constexpr,
v_start: tl.constexpr,
n_groups,
dim: tl.constexpr, # dimension of each head
stride_xqM,
stride_xqG,
stride_xqH,
stride_xkM,
stride_xkG,
stride_xkH,
stride_xvM,
stride_xvG,
stride_xvH,
stride_cachekM,
stride_cachekG,
stride_cachekH,
stride_cachevM,
stride_cachevG,
stride_cachevH,
stride_seqstartq,
stride_seqstartk,
stride_seqlenk,
stride_outqM,
stride_outqG,
stride_outqH,
stride_seqpos,
internal_dtype: tl.constexpr,
# If True, seqstartq and seqstartk are not used but rather we
# assume that every batch element has the same number of
# queries (i.e. num_queries := tl.num_programs(1) )
# and the same cache space cache_padding_length.
# Always False when called below.
const_batch_strides: tl.constexpr,
# If const_batch_strides==True, the common cache length for each batch element.
# (Only the first seqlenk[i] elements are actually in use, and only the last
# num_queries of those are actually written to.)
cache_padding_length,
# offset added to all values in seqlenk before using them.
# Always 0 when called below.
seqlenk_shift: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
adjacents: tl.constexpr,
):
"""
Each letter in this diagram is a whole row of length dim.
INPUT xq xk xv
head_dim ─►
batch qqqqqq kk vv
│ qqqqqq kk vv
▼ qqqqqq kk vv
head_idx: (goes across all heads of all 3 inputs)
▲ ▲ ▲ ▲ ▲ ▲
│ │ │ │ │ │
│ │
0 k_start │v_start │n_total_heads
│ │
│ │
k_start v_start
Output is to out_q (same shape as xq), an xk-shaped part
of cache_k and an xv-shaped part of cache_v
"""
query_pos_in_batch_elt = tl.program_id(0)
batch_elt = tl.program_id(1)
group_head_idx = tl.program_id(2)
group_idx = group_head_idx % n_groups
head_idx = group_head_idx // n_groups
if internal_dtype == "f32":
theta = theta.to(tl.float32)
elif internal_dtype == "f64":
theta = theta.to(tl.float64)
if const_batch_strides:
query_pos = query_pos_in_batch_elt + tl.num_programs(1) * batch_elt
end_query_pos = tl.num_programs(1) * (batch_elt + 1)
else:
query_pos = query_pos_in_batch_elt + tl.load(
seqstartq + batch_elt * stride_seqstartq
)
end_query_pos = tl.load(seqstartq + (batch_elt + 1) * stride_seqstartq)
if query_pos >= end_query_pos:
return
is_q = head_idx < k_start
is_v = head_idx >= v_start
xq += query_pos * stride_xqM + head_idx * stride_xqH + group_idx * stride_xqG
out_q += (
query_pos * stride_outqM + head_idx * stride_outqH + group_idx * stride_outqG
)
if const_batch_strides:
cache_start = cache_padding_length * batch_elt
else:
cache_start = tl.load(seqstartk + batch_elt * stride_seqstartk)
end_of_batch_elt_cache = (
cache_start + tl.load(seqlenk + batch_elt * stride_seqlenk) + seqlenk_shift
)
cache_pos = end_of_batch_elt_cache - (end_query_pos - query_pos)
if seqpos is not None:
seq_pos = tl.load(seqpos + query_pos * stride_seqpos)
else:
seq_pos = cache_pos - cache_start
if first_seqpos is not None:
seq_pos += tl.load(first_seqpos + batch_elt * stride_seqpos)
cache_k += (
(head_idx - k_start) * stride_cachekH
+ cache_pos * stride_cachekM
+ group_idx * stride_cachekG
)
xk += (
query_pos * stride_xkM
+ (head_idx - k_start) * stride_xkH
+ group_idx * stride_xkG
)
in_qk = tl.where(is_q, xq, xk)
out_qk = tl.where(is_q, out_q, cache_k)
cache_v += (
(head_idx - v_start) * stride_cachevH
+ cache_pos * stride_cachevM
+ group_idx * stride_cachevG
)
xv += (
query_pos * stride_xvM
+ (head_idx - v_start) * stride_xvH
+ group_idx * stride_xvG
)
out = tl.where(is_v, cache_v, out_qk)
x_in = tl.where(is_v, xv, in_qk)
for offset in range(0, dim // 2, BLOCK_SIZE // 2):
c = tl.arange(0, BLOCK_SIZE // 2)
powers = (offset + c) * 2.0
if adjacents:
cols_re = (offset + c) * 2
cols_im = cols_re + 1
else:
cols_re = offset + c
cols_im = cols_re + dim // 2
mask = cols_im < dim
re_x = tl.load(x_in + cols_re, mask=mask)
im_x = tl.load(x_in + cols_im, mask=mask)
# freqs = seq_pos / (theta ** (powers / dim))
freqs = seq_pos * pow(theta, powers / (-dim))
sines = tl.sin(freqs)
cosines = tl.cos(freqs)
re_out = re_x * cosines - im_x * sines
im_out = im_x * cosines + re_x * sines
re_out_ = tl.where(is_v, re_x, re_out)
im_out_ = tl.where(is_v, im_x, im_out)
if internal_dtype == "f64":
if re_x.dtype == tl.bfloat16:
# triton 2.0.0 crashes if you try to convert
# float64 directly to bfloat16, so make an intermediate step.
re_out_ = re_out_.to(tl.float32)
im_out_ = im_out_.to(tl.float32)
tl.store(out + cols_re, re_out_, mask=mask)
tl.store(out + cols_im, im_out_, mask=mask) |
A more compact way to define a triton.Config, so it fits on one line | def gen_config(
block_m: int,
block_n: int,
block_k: int,
stages: int,
warps: int,
split_k: int = 1,
group_m: int = 8,
) -> triton.Config:
"""A more compact way to define a triton.Config, so it fits on one line"""
return triton.Config(
{
"BLOCK_M": block_m,
"BLOCK_N": block_n,
"BLOCK_K": block_k,
"SPLIT_K": split_k,
"GROUP_M": group_m,
},
num_stages=stages,
num_warps=warps,
pre_hook=init_to_zero("C1", "C2", "C3") if split_k > 1 else init_to_zero(),
) |
Call into Triton's upstream cost model, with the right args
The upstream function expects arguments to have certain names. Since we
renamed a few of them in our implementation, we rename them back.
At the time of writing (July 2023) the arguments that Triton expects are:
M, N, K, A, B, C, BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages. | def our_estimate_matmul_time(B1, C1, N1, N2, N3, **kwargs):
"""Call into Triton's upstream cost model, with the right args
The upstream function expects arguments to have certain names. Since we
renamed a few of them in our implementation, we rename them back.
At the time of writing (July 2023) the arguments that Triton expects are:
M, N, K, A, B, C, BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages.
"""
return estimate_matmul_time(N=N1 + N2 + N3, B=B1, C=C1, **kwargs) |
A more compact way to define a triton.Config, so it fits on one line | def gen_config(
block_m: int,
block_n: int,
block_k: int,
stages: int,
warps: int,
split_k: int = 1,
group_m: int = 8,
) -> triton.Config:
"""A more compact way to define a triton.Config, so it fits on one line"""
return triton.Config(
{
"BLOCK_M": block_m,
"BLOCK_N": block_n,
"BLOCK_K": block_k,
"SPLIT_K": split_k,
"GROUP_M": group_m,
},
num_stages=stages,
num_warps=warps,
pre_hook=init_to_zero(*[f"C{i+1}{j+1}" for i in range(3) for j in range(3)])
if split_k > 1
else init_to_zero(),
) |
Call into Triton's upstream cost model, with the right args
The upstream function expects arguments to have certain names. Since we
renamed a few of them in our implementation, we rename them back.
At the time of writing (July 2023) the arguments that Triton expects are:
M, N, K, A, B, C, BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages. | def our_estimate_matmul_time(
A11, B11, C11, M1, M2, M3, N1, N2, N3, K1, K2, K3, **kwargs
):
"""Call into Triton's upstream cost model, with the right args
The upstream function expects arguments to have certain names. Since we
renamed a few of them in our implementation, we rename them back.
At the time of writing (July 2023) the arguments that Triton expects are:
M, N, K, A, B, C, BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages.
"""
return estimate_matmul_time(
M=M1 + M2 + M3, N=N1 + N2 + N3, K=K1 + K2 + K3, A=A11, B=B11, C=C11, **kwargs
) |
A pre-configured profiler that will run on the first ~20 steps of the training
It will provide multiple traces that can be exploited later.
Use it in a context manager around your training loop, and call `xformers.profiler.step`
before starting the next iteration.
:Examples:
.. code-block:: python
import torch
import timm.models
import xformers.profiler
dtype = torch.bfloat16
device = "cuda"
model = timm.models.vit_large_patch16_224().to(device).to(dtype)
inp = torch.zeros([64, 3, 224, 224], device=device, dtype=dtype)
optim = torch.optim.Adam(model.parameters())
with xformers.profiler.profile(
output_dir="profile_data",
module=model,
schedule=[
(MemSnapshotsProfiler, 0, 2),
(DetectSlowOpsProfiler, 2, 4),
(NsightProfiler, 4, 6),
(PyTorchProfiler, 6, 20),
]
):
for i in range(20):
model(inp).sum().backward()
optim.step()
optim.zero_grad()
xformers.profiler.step()
# alternatively, use the profiler without context and with ``.start()`` / `.stop()`
# calls.
xprofiler = xformers.profiler.profile(...)
xprofiler.start()
for i in range(20):
model(inp).sum().backward()
optim.step()
optim.zero_grad()
xprofiler.step()
xprofiler.stop() | def profile(
output_dir: str,
module: Optional[nn.Module] = None,
schedule: Sequence[Tuple[Any, int, int]] = DEFAULT_SCHEDULE,
):
"""
A pre-configured profiler that will run on the first ~20 steps of the training
It will provide multiple traces that can be exploited later.
Use it in a context manager around your training loop, and call `xformers.profiler.step`
before starting the next iteration.
:Examples:
.. code-block:: python
import torch
import timm.models
import xformers.profiler
dtype = torch.bfloat16
device = "cuda"
model = timm.models.vit_large_patch16_224().to(device).to(dtype)
inp = torch.zeros([64, 3, 224, 224], device=device, dtype=dtype)
optim = torch.optim.Adam(model.parameters())
with xformers.profiler.profile(
output_dir="profile_data",
module=model,
schedule=[
(MemSnapshotsProfiler, 0, 2),
(DetectSlowOpsProfiler, 2, 4),
(NsightProfiler, 4, 6),
(PyTorchProfiler, 6, 20),
]
):
for i in range(20):
model(inp).sum().backward()
optim.step()
optim.zero_grad()
xformers.profiler.step()
# alternatively, use the profiler without context and with ``.start()`` / `.stop()`
# calls.
xprofiler = xformers.profiler.profile(...)
xprofiler.start()
for i in range(20):
model(inp).sum().backward()
optim.step()
optim.zero_grad()
xprofiler.step()
xprofiler.stop()
"""
return _Profiler(output_dir=output_dir, schedule=schedule, module=module) |
See `xformers.profiler.profile` | def step() -> None:
"""See `xformers.profiler.profile`"""
# Silently return if no profiler is enabled
if _Profiler._CURRENT_PROFILER is None:
return
_Profiler._CURRENT_PROFILER.step() |
Currently only implemented for GPUs | def get_device_limits(device) -> DeviceLimit:
"""Currently only implemented for GPUs"""
if device is not None and device.type == "cuda":
device_sm = torch.cuda.get_device_capability(device)
device_name = torch.cuda.get_device_name(device)
for lim in DEVICE_LIMITS:
if lim.sm == device_sm:
if lim.name in device_name:
return lim
return DeviceLimit() |
Count flops for convolution. Note only multiplication is
counted. Computation for addition and bias is ignored.
Flops for a transposed convolution are calculated as
flops = (x_shape[2:] * prod(w_shape) * batch_size).
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
transposed (bool): is the convolution transposed
Returns:
int: the number of flops | def conv_flop_count(
x_shape: List[int],
w_shape: List[int],
out_shape: List[int],
transposed: bool = False,
) -> float:
"""
Count flops for convolution. Note only multiplication is
counted. Computation for addition and bias is ignored.
Flops for a transposed convolution are calculated as
flops = (x_shape[2:] * prod(w_shape) * batch_size).
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
transposed (bool): is the convolution transposed
Returns:
int: the number of flops
"""
batch_size = x_shape[0]
conv_shape = (x_shape if transposed else out_shape)[2:]
flop = batch_size * prod(w_shape) * prod(conv_shape)
return flop |
Count flops for convolution. | def conv_flop(inputs: List[Any], outputs: List[Any]):
"""
Count flops for convolution.
"""
x, w = inputs[:2]
x_shape, w_shape, out_shape = (get_shape(x), get_shape(w), get_shape(outputs[0]))
transposed = inputs[6]
return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed) |
Converts dense 2d matrix to a csr sparse matrix. | def _nonzero_mask_to_sparse_csr_indices(mask, device):
"""Converts dense 2d matrix to a csr sparse matrix."""
assert len(mask.shape) == 2
index_dtype = torch.int32
# Calculate the offset of each row.
row_offsets = mask.sum(dim=-1, dtype=index_dtype).cumsum(dim=-1, dtype=index_dtype)
row_offsets = torch.nn.functional.pad(row_offsets, (1, 0))
# Create the row indices and sort them.
row_indices = _diffsort(row_offsets).to(index_dtype)
# Extract the column indices for the nonzero values.
column_indices = torch.where(mask)[1].to(index_dtype).contiguous()
row_indices = row_indices.to(device)
row_offsets = row_offsets.to(device)
column_indices = column_indices.to(device)
return row_indices, row_offsets, column_indices |
Converts dense 2d matrix to a csr sparse matrix. | def _dense_to_sparse(matrix, device):
"""Converts dense 2d matrix to a csr sparse matrix."""
assert len(matrix.shape) == 2
value_dtype = torch.float32
# Extract the nonzero values.
mask = matrix != 0
values = matrix[mask].to(dtype=value_dtype, device=device)
row_indices, row_offsets, column_indices = _nonzero_mask_to_sparse_csr_indices(
mask, device
)
return values, row_indices, row_offsets, column_indices |
Apply dropout on the input tensor.
Optionally add a bias, the computation will be fused. | def dropout(
x: torch.Tensor,
p: float,
bias: Optional[torch.Tensor] = None,
activation: Optional[Activation] = None,
):
"""
Apply dropout on the input tensor.
Optionally add a bias, the computation will be fused.
"""
assert p <= 1.0 and p >= 0.0
if p == 1.0:
return torch.zeros_like(x)
# Micro optim, skip dropout
if p == 0.0:
x = x + bias if bias is not None else x
if activation is not None:
activation_fn = build_activation(activation)
return activation_fn(x)
return x
# The normal triton enabled codepath
activation_index = get_triton_activation_index(activation)
return _dropout.apply(
x,
float(p),
bias,
activation_index,
bias is not None and bias.requires_grad,
) |
ReLU_ activation function
.. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html | def relu(x):
"""
ReLU_ activation function
.. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
"""
return tl.where(x >= 0, x, 0.0) |
Squared ReLU activation, as proposed in the Primer_ paper.
.. _Primer: https://arxiv.org/abs/2109.08668 | def squared_relu(x):
"""
Squared ReLU activation, as proposed in the Primer_ paper.
.. _Primer: https://arxiv.org/abs/2109.08668
"""
x_sq = x * x
return tl.where(x > 0.0, x_sq, 0.0) |
Star ReLU activation, as proposed in the "MetaFormer Baselines for Vision"_ paper.
.. _ "MetaFormer Baselines for Vision": https://arxiv.org/pdf/2210.13452.pdf | def star_relu(x):
"""
Star ReLU activation, as proposed in the "MetaFormer Baselines for Vision"_ paper.
.. _ "MetaFormer Baselines for Vision": https://arxiv.org/pdf/2210.13452.pdf
"""
x_sq = x * x
return 0.8944 * tl.where(x > 0.0, x_sq, 0.0) - 0.4472 |
LeakyReLU_ activation
.. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html | def leaky_relu(x):
"""
LeakyReLU_ activation
.. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
"""
return tl.where(x >= 0.0, x, 0.01 * x) |
GeLU_ activation - Gaussian error linear unit
.. _GeLU: https://arxiv.org/pdf/1606.08415.pdf | def gelu(x):
"""
GeLU_ activation - Gaussian error linear unit
.. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
"""
return 0.5 * x * (1 + tanh(_kAlpha * (x + 0.044715 * x * x * x))) |
SmeLU_ activation - Smooth ReLU with beta=2.0
.. _SmeLU: https://arxiv.org/pdf/2202.06499.pdf | def smelu(x):
"""
SmeLU_ activation - Smooth ReLU with beta=2.0
.. _SmeLU: https://arxiv.org/pdf/2202.06499.pdf
"""
beta = 2.0
relu = tl.where(x >= beta, x, 0.0)
return tl.where(tl.abs(x) <= beta, (x + beta) * (x + beta) / (4.0 * beta), relu) |
Apply dropout on an input tensor
Y : Output (M, N)
X : Input (M, N)
BIAS (N,)
SEEDS (M,)
p : dropout probability | def k_dropout_fw(
Y, X, BIAS, SEEDS,
stride,
M, N,
p: tl.constexpr,
is_fp16: tl.constexpr, # autotune
ACTIVATION: tl.constexpr,
# Meta-parameters
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
SIZE_RAND_BLOCK: tl.constexpr,
USE_BIAS: tl.constexpr,
):
"""
Apply dropout on an input tensor
Y : Output (M, N)
X : Input (M, N)
BIAS (N,)
SEEDS (M,)
p : dropout probability
"""
# fmt: on
row_id = tl.program_id(axis=0)
rows = row_id * BLOCK_M + tl.arange(0, BLOCK_M)
col_id = tl.program_id(axis=1)
cols = col_id * BLOCK_N + tl.arange(0, BLOCK_N)
# pointers starting point
x_ptrs = X + rows[:, None] * stride + cols[None, :]
y_ptrs = Y + rows[:, None] * stride + cols[None, :]
# good to go, start the layer computations
col_mask = cols[None, :] < N
p_scale = 1. / (1. - p)
if USE_BIAS:
b_ptrs = BIAS + cols[None, :]
bias = tl.load(b_ptrs, mask=cols[None, :] < N, other=0.)
else:
bias = x_ptrs # will not be used
block_mask = (rows[:, None] < M) & col_mask
x = tl.load(x_ptrs, mask=block_mask, other=0.0)
# optionally apply a fused bias
if USE_BIAS:
x += bias
# optional: fused activation (while the data is in shared memory)
if ACTIVATION == 1:
x = relu(x)
elif ACTIVATION == 2:
x = leaky_relu(x)
elif ACTIVATION == 3:
x = gelu(x)
elif ACTIVATION == 4:
x = squared_relu(x)
elif ACTIVATION == 5:
x = smelu(x)
# get the random keep mask
rand_offsets = tl.arange(0, SIZE_RAND_BLOCK)
seed_int = tl.load(SEEDS + col_id)
r = tl.rand(seed_int, rand_offsets)
keep_mask = r > p
# prune and normalize in one go
keep = tl.view(keep_mask, x.shape)
output = tl.where(keep, (x * p_scale).to(x.dtype), 0.)
tl.store(y_ptrs, output, mask=block_mask) |
Apply dropout on an input tensor
GRAD_OUT (M, N)
GRAD_BIAS (N,)
GRAD_IN (M, N)
BIAS (N,)
SEEDS (N,)
p : dropout probability | def k_dropout_bw(
GRAD_IN, GRAD_BIAS, GRAD_OUT,
INPUTS, BIAS, SEEDS,
stride_grad, stride_inputs,
M, N,
p: tl.constexpr,
is_fp16: tl.constexpr, # autotune
ACTIVATION: tl.constexpr,
# Meta-parameters
BLOCK_M: tl.constexpr, # heuristics
BLOCK_N: tl.constexpr,
SIZE_RAND_BLOCK: tl.constexpr,
TRAINABLE_BIAS: tl.constexpr,
USE_BIAS: tl.constexpr,
):
"""
Apply dropout on an input tensor
GRAD_OUT (M, N)
GRAD_BIAS (N,)
GRAD_IN (M, N)
BIAS (N,)
SEEDS (N,)
p : dropout probability
"""
# fmt: on
row_id = tl.program_id(axis=0)
rows = row_id * BLOCK_M + tl.arange(0, BLOCK_M)
col_id = tl.program_id(axis=1)
cols = col_id * BLOCK_N + tl.arange(0, BLOCK_N)
# pointers starting point
grad_out_ptrs = GRAD_OUT + rows[:, None] * stride_grad + cols[None, :]
grad_in_ptrs = GRAD_IN + rows[:, None] * stride_grad + cols[None, :]
input_ptrs = INPUTS + rows[:, None] * stride_inputs + cols[None, :]
# now go over the tiles
grad_bias = tl.zeros((BLOCK_N,), dtype=tl.float32)
col_mask = cols[None, :] < N
p_scale = 1. / (1. - p)
if USE_BIAS:
b_ptrs = BIAS + cols[None, :]
bias = tl.load(b_ptrs, mask=col_mask, other=0.)
block_mask = (rows[:, None] < M) & col_mask
grad_out = tl.load(grad_out_ptrs, mask=block_mask, other=0.)
# optional: fused activation (while the data is in shared memory)
if ACTIVATION:
inputs = tl.load(input_ptrs, mask=block_mask, other=0.)
# optionally apply a fused bias
if USE_BIAS:
inputs += bias
if ACTIVATION == 1:
act_grad = relu_grad(inputs)
elif ACTIVATION == 2:
act_grad = leaky_relu_grad(inputs)
elif ACTIVATION == 3:
act_grad = gelu_grad(inputs)
elif ACTIVATION == 4:
act_grad = squared_relu_grad(inputs)
elif ACTIVATION == 5:
act_grad = smelu_grad(inputs)
grad_out *= act_grad
# randomly prune (and scale) the resulting buffer, possibly a no-op
# note that even if we did not save the mask from the FW pass, it is generated
# from the same seeds, so the same drop mask is applied here
rand_offsets = tl.arange(0, SIZE_RAND_BLOCK)
seed_int = tl.load(SEEDS + col_id)
r = tl.rand(seed_int, rand_offsets)
r = tl.view(r, grad_out.shape)
output = tl.where(r > p, (grad_out * p_scale).to(grad_out.dtype), 0.)
# write-back
tl.store(grad_in_ptrs, output, mask=block_mask)
# optionally accumulate the bias gradient
if TRAINABLE_BIAS:
grad_bias += tl.sum(output, axis=0)
if TRAINABLE_BIAS:
grad_bias_ptr = GRAD_BIAS + row_id * N + cols
tl.store(grad_bias_ptr, grad_bias, mask=cols < N) |
Go over all the activation inputs, compute the corresponding gradient | def kernel_bw(
# Pointers to matrices
GRAD_ACT, GRAD_OUT, ACT_INPUTS,
# Matrix dimensions
N,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_gom, stride_aim,
# Meta-parameters
BLOCK_N: tl.constexpr,
EVEN_N: tl.constexpr,
ACTIVATION_GRAD: tl.constexpr,
):
# fmt: on
"""
Go over all the activation inputs, compute the corresponding gradient
"""
# this kernel is relatively simple in terms of scheduling:
# - per row (pid_m)
# - each program a given chunk on the col axis,
# since it's more effective memory and occupancy wise
pid_m, pid_n = tl.program_id(axis=0), tl.program_id(axis=1)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# the memory addresses of elements in the first block of
# A and W can be computed using numpy-style broadcasting
act_input_ptrs = ACT_INPUTS + pid_m * stride_aim + rn
# compute the gradient which is related to this activation
if EVEN_N:
act_in = tl.load(act_input_ptrs)
else:
act_in = tl.load(act_input_ptrs, mask=rn < N, other=0.0)
if ACTIVATION_GRAD == 1:
grad_act = relu_grad(act_in)
elif ACTIVATION_GRAD == 2:
grad_act = leaky_relu_grad(act_in)
elif ACTIVATION_GRAD == 3:
grad_act = gelu_grad(act_in)
elif ACTIVATION_GRAD == 4:
grad_act = squared_relu_grad(act_in)
elif ACTIVATION_GRAD == 5:
grad_act = smelu_grad(act_in)
elif ACTIVATION_GRAD == 6:
grad_act = star_relu_grad(act_in)
else:
grad_act = act_in
# now read the incoming gradient, the backpropagated one is the multiple of both
grad_out_ptrs = GRAD_OUT + pid_m * stride_gom + rn
if EVEN_N:
grad_out = tl.load(grad_out_ptrs)
else:
grad_out = tl.load(grad_out_ptrs, mask=rn < N)
grad_act *= grad_out
# write back result
grad_act_ptrs = GRAD_ACT + pid_m * stride_gom + rn
tl.store(grad_act_ptrs, grad_act, mask=rn < N) |
Compute grad_in = activation^-1(grad_out) @ weight.transpose()
.. note: The weight buffer is transposed on the fly
.. note: Activation gradient needs to be a Triton kernel | def fused_matmul_backward(
grad_out: torch.Tensor,
inputs: torch.Tensor,
act_in: Optional[torch.Tensor],
weight: torch.Tensor,
trainable_weight: bool,
trainable_bias: bool,
activation_grad: int = 0,
):
"""
Compute grad_in = activation^-1(grad_out) @ weight.transpose()
.. note: The weight buffer is transposed on the fly
.. note: Activation gradient needs to be a Triton kernel
"""
# Make sure that we don't have to handle the stride over cols
if not grad_out.is_contiguous():
grad_out = grad_out.contiguous()
grad_out_ = grad_out if grad_out.ndim == 2 else grad_out.flatten(0, -2)
inputs_ = inputs if inputs.ndim == 2 else inputs.flatten(0, -2)
assert grad_out_.shape[1] == weight.shape[0], "Incompatible dimensions in between grad_out and weight"
M, N = grad_out_.shape
N, _ = weight.shape
# Compute the gradient for the activation
if activation_grad > 0:
grad_act = torch.empty_like(grad_out_)
# Some activations do not require their inputs to
# know of their grad, the downstream grad is enough
if act_in is None:
act_in = grad_out_
grid = lambda META: (M, triton.cdiv(N, META["BLOCK_N"])) # noqa
# fmt: off
kernel_bw[grid](
grad_act, grad_out_, act_in, # data ptrs
N, # shapes
grad_act.stride(0), act_in.stride(0), # strides
ACTIVATION_GRAD=activation_grad, # optional fused activation
)
# fmt: on
# Backpropagation going up, the reference gradient is now
# just before the activation
grad_out_ = grad_act
# The following ops can also be handled by pytorch
grad_in = triton.ops.matmul(grad_out_, weight)
grad_weight = grad_out_.transpose(1, 0) @ inputs_ if trainable_weight else None
grad_bias = torch.sum(grad_out_, dim=0) if trainable_bias else None
return grad_in.reshape_as(inputs), grad_weight, grad_bias |
Kernel for computing Out = activation(A x W + C)
- Input has shape (M, K)
- Weight has shape (K, N)
- Bias has shape (N,)
- Output has shape (M, N)
- ActInputs (optional) has shape (M, N)
'ActInputs' optionally saves the A x W + C intermediate for backward computations
This kernel will consolidate over K | def kernel_fma(
# Pointers to matrices
OUT, ACT_INPUTS, INPUT, WEIGHT, bias,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_om, stride_im,
stride_wn,
# Meta-parameters
BLOCK_M: tl.constexpr, GROUP_M: tl.constexpr,
BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
EVEN_N: tl.constexpr,
BIAS: tl.constexpr,
SAVE_ACT_INPUTS: tl.constexpr,
ACTIVATION: tl.constexpr,
is_fp16: tl.constexpr, # autotune
):
# fmt: on
"""
Kernel for computing Out = activation(A x W + C)
- Input has shape (M, K)
- Weight has shape (K, N)
- Bias has shape (N,)
- Output has shape (M, N)
- ActInputs (optional) has shape (M, N)
'ActInputs' optionally saves the A x W + C intermediate for backward computations
This kernel will consolidate over K
"""
# programs are grouped together to improve L2 hit rate
# the logic is that we'll consolidate over K. If the programs were not grouped,
# then multiple cols/rows in the result would end up pulling in the same row and lines
# from the inputs. By grouping the computation we ensure some data reuse, which the hardware
# covers via the L2 cache
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_M) # number of program ids along the M axis
num_pid_n = tl.cdiv(N, BLOCK_N) # number of programs ids along the N axis
num_pid_in_group = GROUP_M * num_pid_n # number of programs in group
group_id = pid // num_pid_in_group # id of the group this program is in
first_pid_m = group_id * GROUP_M # row-id of the first program in the group
GROUP_M = min(
num_pid_m - first_pid_m, GROUP_M
) # if `num_pid_m` isn't divisible by `GROUP_M`, the last group is smaller
# *within groups*, programs are ordered in a column-major order
# row-id /col-id of the program in the *launch grid*
pid_m = first_pid_m + (pid % GROUP_M)
pid_n = (pid % num_pid_in_group) // GROUP_M
# now compute the block that each program will go through
# rm (resp. rn) denotes a range of indices
# for rows (resp. col) of C
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
rk = tl.arange(0, BLOCK_K)
# the memory addresses of elements can follow numpy broadcasting
input_ptrs = INPUT + rm[:, None] * stride_im
weight_ptrs = WEIGHT + rn[None, :] * stride_wn
# initialize and iteratively update accumulator
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
if BIAS:
if EVEN_N:
bias = tl.load(bias + rn).to(tl.float32)
else:
bias = tl.load(bias + rn, mask=rn < N, other=0.0).to(tl.float32)
acc += bias[None, :]
# block level matrix multiplication.
# We fetch a block memory block from both inputs, matmul and accumulate, then repeat
mask_rn = rn < N
mask_rm = rm < M
for i in range(0, K, BLOCK_K):
rk = tl.arange(0, BLOCK_K) + i
a = tl.load(input_ptrs + rk[None, :], mask=((rk[None, :] < K) & mask_rm[:, None]), other=0.0)
w = tl.load(weight_ptrs + rk[:, None], mask=((rk[:, None] < K) & mask_rn[None, :]), other=0.0)
acc += tl.dot(a, w)
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
# optional: save the activation inputs
if SAVE_ACT_INPUTS:
act_in_ptrs = ACT_INPUTS + rm[:, None] * stride_om + rn[None, :]
tl.store(act_in_ptrs, acc, mask=mask_rm[:, None] & mask_rn[None, :])
# optional: fused activation (while the data is in shared memory)
if ACTIVATION == 1:
acc = relu(acc)
elif ACTIVATION == 2:
acc = leaky_relu(acc)
elif ACTIVATION == 3:
acc = gelu(acc)
elif ACTIVATION == 4:
acc = squared_relu(acc)
elif ACTIVATION == 5:
acc = smelu(acc)
elif ACTIVATION == 6:
acc = star_relu(acc)
# write back result
out_ptrs = OUT + rm[:, None] * stride_om + rn[None, :]
tl.store(out_ptrs, acc, mask=mask_rm[:, None] & mask_rn[None, :]) |
Compute e = activation(x @ weight + bias).
This wrapper kicks the `kernel_fma` Triton kernel | def fused_matmul(
x: torch.Tensor,
weight: torch.Tensor,
bias: Optional[torch.Tensor],
activation=0,
save_act_inputs: bool = False
):
"""
Compute e = activation(x @ weight + bias).
This wrapper kicks the `kernel_fma` Triton kernel
"""
if not x.is_contiguous():
x = x.contiguous()
x_ = x if x.ndim == 2 else x.flatten(0, -2)
assert (
x_.shape[1] == weight.shape[1]
), f"Incompatible dimensions in between inputs and weight, {x_.shape} - {weight.shape}"
assert bias is None or bias.is_contiguous()
assert (
bias is None or bias.shape[0] == weight.shape[0]
), "Incompatible dimensions in between weight and bias"
assert weight.is_contiguous()
M, K = x_.shape
N, K = weight.shape
outputs = torch.empty((M, N), device=x.device, dtype=x.dtype)
act_inputs = torch.empty_like(outputs) if save_act_inputs else x # will not be used in that case
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
# fmt: off
kernel_fma[grid](
outputs, act_inputs, x_, weight, # data ptrs
bias if bias is not None else x, # auto skip bias if not present
M, N, K, # shapes
outputs.stride(0), x_.stride(0), # strides
weight.stride(0),
ACTIVATION=activation, # optional fused activation
BIAS=bias is not None, # optional fused bias
GROUP_M=8, # speed optimization: group the programs
SAVE_ACT_INPUTS=save_act_inputs,
is_fp16=x_.dtype == torch.float16
)
# fmt: on
outputs = outputs if x.ndim == 2 else outputs.reshape(*x.shape[:-1], N)
return outputs, act_inputs if save_act_inputs else None |
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta | def layer_norm_fw(X, Y, W, B, M, V, stride, N, eps, affine: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# fmt: on
"""
Fused layernorm kernel over a 3d tensor.
The layer norm is applied over the last dimension.
Compute
y = (x - E(x))/(sqrt(var(x) + epsilon)) * gamma + beta
"""
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
# Move to this row
x_ptrs = X + row * stride + cols
x = tl.load(x_ptrs, mask=mask, other=0.0).to(tl.float32)
# Compute mean and variance
mean = tl.sum(x, axis=0) / N
x_zm = tl.where(mask, x - mean, 0.0)
tl.store(M + row, mean)
x_var = tl.sum(x_zm * x_zm, axis=0) / N
rstd = 1.0 / tl.sqrt(x_var + eps)
# Normalize, optionally affine
y = x_zm * rstd
tl.store(V + row, rstd)
mask = cols < N
if affine:
w = tl.load(W + cols, mask=mask, other=1.0)
b = tl.load(B + cols, mask=mask, other=0.0)
y = y * w + b
y_ptrs = Y + row * stride + cols
tl.store(y_ptrs, y, mask=mask) |
Fused softmax kernel over a 3d tensor.
The softmax is applied over the last dimension, meaning that this is equivalent to torch.softmax(tensor, dim=-1)
Note, if the last dimension is large, say 128K elements, the kernel compile time can shot up to many minutes when
the kernel is run for the first time. | def _softmax(
Y, X, M,
stride_ym, stride_yn,
stride_xm, stride_xn,
stride_mn,
K,
# Meta-params
depth: tl.constexpr,
causal: tl.constexpr,
use_mask: tl.constexpr,
log: tl.constexpr,
):
# fmt: om
"""
Fused softmax kernel over a 3d tensor.
The softmax is applied over the last dimension, meaning that this is equivalent to torch.softmax(tensor, dim=-1)
Note, if the last dimension is large, say 128K elements, the kernel compile time can shot up to many minutes when
the kernel is run for the first time.
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, depth)
# the memory address of all the elements that we want to load can be computed as follows
x_ptrs = X + m * stride_xm + n * stride_xn + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if causal:
io_mask = io_mask & (k <= n)
x = tl.load(x_ptrs, mask=io_mask, other=float("-inf")).to(tl.float32)
# Causal - 2: enforce correctness over a couple of misloaded values
if causal:
off = float("-inf")
off = off.to(x.dtype) # type: ignore
x = tl.where(k > n, off, x)
if use_mask:
mask_ptrs = M + n * stride_mn + k
add_mask = tl.load(mask_ptrs, io_mask, other=float("-inf")).to(tl.float32)
x += add_mask
# compute numerically-stable softmax
z = x - tl.max(x, axis=0)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
if log:
y = z - tl.log(denom)
else:
y = num / denom
# write back to Y.
# we only write once, hence the "fused" softmax naming
y_ptrs = Y + m * stride_ym + n * stride_yn + k
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
tl.store(y_ptrs, y, mask=k < K) |
Compute the softmax gradients.
..Note: Not autotuning for now because this would lead to broken accumulated gradients | def _softmax_backward(
GradIn, GradOut, Out,
stride_bm, stride_bn,
stride_gm, stride_gn,
stride_om, stride_on,
K,
# meta-params
depth: tl.constexpr,
causal: tl.constexpr,
log: tl.constexpr,
):
# fmt: on
"""
Compute the softmax gradients.
..Note: Not autotuning for now because this would lead to broken accumulated gradients
"""
m = tl.program_id(0)
n = tl.program_id(1)
# col indices
k = tl.arange(0, depth)
# the memory address of all the elements that we want to load can be computed as follows
grad_out_ptrs = GradOut + m * stride_gm + n * stride_gn + k
out_ptrs = Out + m * stride_om + n * stride_on + k
# load input data; pad out-of-bounds elements with 0
io_mask = k < K
# Causal - 1: skip on the loads directly
if causal:
io_mask = io_mask & (k <= n)
g = tl.load(grad_out_ptrs, mask=io_mask, other=float(0)).to(tl.float32)
o = tl.load(out_ptrs, mask=io_mask, other=float(0)).to(tl.float32)
# Causal - 2: enforce correctness over a couple of misloaded values
if causal:
zero = float(0)
zero = zero.to(g.dtype) # type: ignore
g = tl.where(k > n, zero, g)
o = tl.where(k > n, zero, o)
if log:
s = tl.sum(g, 0)
grad_in = g - tl.exp(o) * s
else:
# Step 1: Compute the intermediate sum used for the gradient
s = tl.sum(g * o, 0)
# Step 2: Compute the gradients
grad_in = o * (g - s)
# write back to the input gradients
# technically we could write only the lower triangular matrix in the causal case
# but this is deemed to error prone
grad_in_ptrs = GradIn + m * stride_bm + n * stride_bn + k
tl.store(grad_in_ptrs, grad_in, mask=k < K) |
Applies the Softmax function to an 3-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
.. warning: softmax is computed on the last dimension of the input tensor.
Args:
x: input tensor.
mask: optional mask, its application will be fused to the softmax computation if triton is used
causal: optional performance optimization, if triton is used and the attention is causal
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1] and sum to 1 | def softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
) -> torch.Tensor:
r"""Applies the Softmax function to an 3-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
.. warning: softmax is computed on the last dimension of the input tensor.
Args:
x: input tensor.
mask: optional mask, its application will be fused to the softmax computation if triton is used
causal: optional performance optimization, if triton is used and the attention is causal
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1] and sum to 1
"""
return _softmax_dispatch(x, log=False, mask=mask, causal=causal) |
Applies the :math:`\log(\text{Softmax}(x))` function to an 3-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
x: input tensor.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0) | def log_softmax(
x: torch.Tensor, mask: Optional[torch.Tensor] = None, causal: bool = False
) -> torch.Tensor:
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an 3-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
x: input tensor.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [-inf, 0)
"""
return _softmax_dispatch(x, log=True, mask=mask, causal=causal) |
Specializes a triton kernel with variable number of inputs
to a specific number of inputs `N`.
NOTE: Because it's quite costly to call `triton.jit`,
we cache the returned value with `lru_cache` | def unroll_varargs(kernel, N: int):
"""
Specializes a triton kernel with variable number of inputs
to a specific number of inputs `N`.
NOTE: Because it's quite costly to call `triton.jit`,
we cache the returned value with `lru_cache`
"""
global _FILENAME_TO_SRC, _getlines_orig
k = triton.JITFunction(kernel.fn)
parsed = ast.parse(k.src)
nodeVisitor = _VisitorUnrollKernel(N=N)
parsed = nodeVisitor.visit(parsed)
parsed = ast.fix_missing_locations(parsed)
# NOTE: `ast.unparse` requires python 3.9+
if (sys.version_info.major, sys.version_info.minor) <= (3, 8):
raise RuntimeError("Error: This functionality requires python 3.9 or above")
new_src = ast.unparse(parsed) # type: ignore
# Now we want to `eval` the function, but we need all this
# boilerplate code to make sure triton can run `inspect.getsource`
fn_filename = f"<unroll_varargs-{kernel.fn.__name__}-{N}>"
# Create function given source
code = compile(new_src, fn_filename, "exec")
_locals: Dict[str, Any] = {}
exec(code, kernel.fn.__globals__, _locals)
assert len(_locals) == 1, len(_locals)
fn = next(iter(_locals.values()))
# Patch `getlines` only the first time
if not _FILENAME_TO_SRC:
_getlines_orig = linecache.getlines
linecache.getlines = _monkey_patched_getlines
_FILENAME_TO_SRC[fn_filename] = new_src
jitted_fn = triton.jit(fn)
jitted_fn.src = new_src
return jitted_fn |
Remove the lexer/parser modules that are dynamically created. | def clean_tables():
"""Remove the lexer/parser modules that are dynamically created."""
for f in TABLES:
if os.path.isfile(f):
os.remove(f)
print("Removed " + f) |
Subsets and Splits