method_name
stringlengths 3
45
| method_body
stringlengths 9
6.25k
| full_code
stringlengths 35
7.02k
| docstring
stringlengths 18
4.7k
⌀ |
---|---|---|---|
start_background_loop | """Start the background loop."""
if self.is_running:
raise RuntimeError('Background loop is already running.')
self._request_tracker.init_event()
self._background_loop_unshielded = asyncio.get_event_loop().create_task(self
.run_engine_loop())
self._background_loop_unshielded.add_done_callback(partial(
_raise_exception_on_finish, request_tracker=self._request_tracker))
self.background_loop = asyncio.shield(self._background_loop_unshielded) | def start_background_loop(self) ->None:
"""Start the background loop."""
if self.is_running:
raise RuntimeError('Background loop is already running.')
self._request_tracker.init_event()
self._background_loop_unshielded = asyncio.get_event_loop().create_task(
self.run_engine_loop())
self._background_loop_unshielded.add_done_callback(partial(
_raise_exception_on_finish, request_tracker=self._request_tracker))
self.background_loop = asyncio.shield(self._background_loop_unshielded) | Start the background loop. |
forward | x = self.norm_1(hidden_states)
x = self.attn(position_ids=position_ids, hidden_states=x, kv_cache=kv_cache,
input_metadata=input_metadata)
hidden_states = hidden_states + x
x = self.norm_2(hidden_states)
x = self.ffn(x)
hidden_states = hidden_states + x
return hidden_states | def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor,
kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor:
x = self.norm_1(hidden_states)
x = self.attn(position_ids=position_ids, hidden_states=x, kv_cache=
kv_cache, input_metadata=input_metadata)
hidden_states = hidden_states + x
x = self.norm_2(hidden_states)
x = self.ffn(x)
hidden_states = hidden_states + x
return hidden_states | null |
__eq__ | if not isinstance(other, SequenceGroupOutput):
raise NotImplementedError()
return self.samples == other.samples and self.prompt_logprobs == other.prompt_logprobs | def __eq__(self, other: object) ->bool:
if not isinstance(other, SequenceGroupOutput):
raise NotImplementedError()
return (self.samples == other.samples and self.prompt_logprobs == other
.prompt_logprobs) | null |
test_sampler_all_greedy | set_random_seed(seed)
batch_size = random.randint(1, 256)
input_tensor, fake_logits, sampler, model_runner = _prepare_test(batch_size)
seq_group_metadata_list = []
prompt_lens = []
for i in range(batch_size):
seq_group_metadata_list.append(SequenceGroupMetadata(request_id=
f'test_{i}', is_prompt=True, seq_data={(0): SequenceData([1, 2, 3])
}, sampling_params=SamplingParams(temperature=0), block_tables={(0):
[1]}))
prompt_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len())
sampling_metadata = model_runner._prepare_sample(seq_group_metadata_list,
prompt_lens)
sampler_output = sampler(embedding=None, hidden_states=input_tensor,
sampling_metadata=sampling_metadata)
expected = torch.argmax(fake_logits, dim=-1)
for i, sequence_output in enumerate(sampler_output):
for nth_output in sequence_output.samples:
assert nth_output.output_token == expected[i].item() | @pytest.mark.parametrize('seed', RANDOM_SEEDS)
def test_sampler_all_greedy(seed: int):
set_random_seed(seed)
batch_size = random.randint(1, 256)
input_tensor, fake_logits, sampler, model_runner = _prepare_test(batch_size
)
seq_group_metadata_list = []
prompt_lens = []
for i in range(batch_size):
seq_group_metadata_list.append(SequenceGroupMetadata(request_id=
f'test_{i}', is_prompt=True, seq_data={(0): SequenceData([1, 2,
3])}, sampling_params=SamplingParams(temperature=0),
block_tables={(0): [1]}))
prompt_lens.append(seq_group_metadata_list[-1].seq_data[0].get_len())
sampling_metadata = model_runner._prepare_sample(seq_group_metadata_list,
prompt_lens)
sampler_output = sampler(embedding=None, hidden_states=input_tensor,
sampling_metadata=sampling_metadata)
expected = torch.argmax(fake_logits, dim=-1)
for i, sequence_output in enumerate(sampler_output):
for nth_output in sequence_output.samples:
assert nth_output.output_token == expected[i].item() | null |
get_pipeline_model_parallel_world_size | """Return world size for the pipeline model parallel group."""
return torch.distributed.get_world_size(group=
get_pipeline_model_parallel_group()) | def get_pipeline_model_parallel_world_size():
"""Return world size for the pipeline model parallel group."""
return torch.distributed.get_world_size(group=
get_pipeline_model_parallel_group()) | Return world size for the pipeline model parallel group. |
_yarn_find_correction_dim | return dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi)
) / (2 * math.log(base)) | def _yarn_find_correction_dim(num_rotations: int, dim: int, base: float=
10000, max_position_embeddings: int=2048) ->float:
return dim * math.log(max_position_embeddings / (num_rotations * 2 *
math.pi)) / (2 * math.log(base)) | null |
forward | hidden_states = self.transformer(input_ids, positions, kv_caches,
input_metadata)
return hidden_states | def forward(self, input_ids: torch.Tensor, positions: torch.Tensor,
kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor:
hidden_states = self.transformer(input_ids, positions, kv_caches,
input_metadata)
return hidden_states | null |
__init__ | super().__init__()
self.num_heads = num_heads
self.head_size = head_size
self.scale = float(scale)
self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads
self.sliding_window = sliding_window
if alibi_slopes is not None:
alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
self.register_buffer('alibi_slopes', alibi_slopes, persistent=False)
assert self.num_heads % self.num_kv_heads == 0
self.num_queries_per_kv = self.num_heads // self.num_kv_heads
if self.head_size not in _SUPPORTED_HEAD_SIZES:
raise ValueError(
f'head_size ({self.head_size}) is not supported. Supported head sizes: {_SUPPORTED_HEAD_SIZES}.'
) | def __init__(self, num_heads: int, head_size: int, scale: float,
num_kv_heads: Optional[int]=None, alibi_slopes: Optional[List[float]]=
None, sliding_window: Optional[int]=None) ->None:
super().__init__()
self.num_heads = num_heads
self.head_size = head_size
self.scale = float(scale)
self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads
self.sliding_window = sliding_window
if alibi_slopes is not None:
alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
self.register_buffer('alibi_slopes', alibi_slopes, persistent=False)
assert self.num_heads % self.num_kv_heads == 0
self.num_queries_per_kv = self.num_heads // self.num_kv_heads
if self.head_size not in _SUPPORTED_HEAD_SIZES:
raise ValueError(
f'head_size ({self.head_size}) is not supported. Supported head sizes: {_SUPPORTED_HEAD_SIZES}.'
) | null |
num_unfinished_seqs | return len(self.get_unfinished_seqs()) | def num_unfinished_seqs(self) ->int:
return len(self.get_unfinished_seqs()) | null |
ref_single_query_cached_kv_attention | num_query_heads = query.shape[1]
num_kv_heads = value_cache.shape[1]
head_size = value_cache.shape[2]
block_size = value_cache.shape[3]
num_seqs = query.shape[0]
block_tables = block_tables.cpu().tolist()
context_lens = context_lens.cpu().tolist()
for i in range(num_seqs):
q = query[i].unsqueeze(0)
block_table = block_tables[i]
context_len = int(context_lens[i])
keys = []
values = []
for j in range(context_len):
block_number = int(block_table[j // block_size])
block_offset = j % block_size
k = key_cache[block_number, :, :, block_offset, :]
k = k.reshape(num_kv_heads, head_size)
keys.append(k)
v = value_cache[block_number, :, :, block_offset]
values.append(v)
keys = torch.stack(keys, dim=0)
values = torch.stack(values, dim=0)
if num_queries_per_kv > 1:
keys = torch.repeat_interleave(keys, num_queries_per_kv, dim=1)
values = torch.repeat_interleave(values, num_queries_per_kv, dim=1)
alibi_bias = None
if alibi_slopes is not None:
position_ids = torch.arange(context_len, device=query.device).int()
alibi_bias = (position_ids - context_len + 1).float()
alibi_bias = alibi_slopes.view(-1, 1, 1) * alibi_bias.view(1, 1, -1)
out = ref_masked_attention(q, keys, values, scale, alibi_bias)
out = out.view(num_query_heads, head_size)
output[i].copy_(out, non_blocking=True) | def ref_single_query_cached_kv_attention(output: torch.Tensor, query: torch
.Tensor, num_queries_per_kv: int, key_cache: torch.Tensor, value_cache:
torch.Tensor, block_tables: torch.Tensor, context_lens: torch.Tensor,
scale: float, alibi_slopes: Optional[torch.Tensor]) ->None:
num_query_heads = query.shape[1]
num_kv_heads = value_cache.shape[1]
head_size = value_cache.shape[2]
block_size = value_cache.shape[3]
num_seqs = query.shape[0]
block_tables = block_tables.cpu().tolist()
context_lens = context_lens.cpu().tolist()
for i in range(num_seqs):
q = query[i].unsqueeze(0)
block_table = block_tables[i]
context_len = int(context_lens[i])
keys = []
values = []
for j in range(context_len):
block_number = int(block_table[j // block_size])
block_offset = j % block_size
k = key_cache[block_number, :, :, block_offset, :]
k = k.reshape(num_kv_heads, head_size)
keys.append(k)
v = value_cache[block_number, :, :, block_offset]
values.append(v)
keys = torch.stack(keys, dim=0)
values = torch.stack(values, dim=0)
if num_queries_per_kv > 1:
keys = torch.repeat_interleave(keys, num_queries_per_kv, dim=1)
values = torch.repeat_interleave(values, num_queries_per_kv, dim=1)
alibi_bias = None
if alibi_slopes is not None:
position_ids = torch.arange(context_len, device=query.device).int()
alibi_bias = (position_ids - context_len + 1).float()
alibi_bias = alibi_slopes.view(-1, 1, 1) * alibi_bias.view(1, 1, -1
)
out = ref_masked_attention(q, keys, values, scale, alibi_bias)
out = out.view(num_query_heads, head_size)
output[i].copy_(out, non_blocking=True) | null |
forward | hidden_states = self.transformer(input_ids, positions, kv_caches,
input_metadata)
return hidden_states | def forward(self, input_ids: torch.LongTensor, positions: torch.Tensor,
kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor:
hidden_states = self.transformer(input_ids, positions, kv_caches,
input_metadata)
return hidden_states | null |
get_max_shared_memory_bytes | """Returns the maximum shared memory per thread block in bytes."""
cudaDevAttrMaxSharedMemoryPerBlockOptin = 97 if not is_hip() else 74
max_shared_mem = cuda_utils.get_device_attribute(
cudaDevAttrMaxSharedMemoryPerBlockOptin, gpu)
return int(max_shared_mem) | def get_max_shared_memory_bytes(gpu: int=0) ->int:
"""Returns the maximum shared memory per thread block in bytes."""
cudaDevAttrMaxSharedMemoryPerBlockOptin = 97 if not is_hip() else 74
max_shared_mem = cuda_utils.get_device_attribute(
cudaDevAttrMaxSharedMemoryPerBlockOptin, gpu)
return int(max_shared_mem) | Returns the maximum shared memory per thread block in bytes. |
load_model | self.model_runner.load_model() | def load_model(self):
self.model_runner.load_model() | null |
__init__ | super().__init__()
self.config = config
self.linear_method = linear_method
self.gpt_neox = GPTNeoXModel(config, linear_method)
self.embed_out = ParallelLMHead(config.vocab_size, config.hidden_size)
self.sampler = Sampler(config.vocab_size) | def __init__(self, config, linear_method: Optional[LinearMethodBase]=None):
super().__init__()
self.config = config
self.linear_method = linear_method
self.gpt_neox = GPTNeoXModel(config, linear_method)
self.embed_out = ParallelLMHead(config.vocab_size, config.hidden_size)
self.sampler = Sampler(config.vocab_size) | null |
pick_ith | logits[len(token_ids)] = float('inf')
return logits | def pick_ith(token_ids, logits):
logits[len(token_ids)] = float('inf')
return logits | null |
forward | hidden_states, _ = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states, _ = self.c_proj(hidden_states)
return hidden_states | def forward(self, hidden_states: torch.Tensor) ->torch.Tensor:
hidden_states, _ = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states, _ = self.c_proj(hidden_states)
return hidden_states | null |
_multinomial | if num_samples > 1:
probs = probs[:, None, :].expand(probs.shape[0], num_samples, probs.
shape[1]).contiguous().view(-1, probs.shape[1])
q = torch.empty_like(probs).exponential_(1)
return probs.div_(q).argmax(dim=1).view(-1, num_samples) | def _multinomial(probs: torch.Tensor, num_samples: int):
if num_samples > 1:
probs = probs[:, None, :].expand(probs.shape[0], num_samples, probs
.shape[1]).contiguous().view(-1, probs.shape[1])
q = torch.empty_like(probs).exponential_(1)
return probs.div_(q).argmax(dim=1).view(-1, num_samples) | null |
__init__ | if config.hidden_size == 4096:
super().__init__(config, 'ROPE', linear_method)
else:
super().__init__(config, 'ALIBI', linear_method) | def __init__(self, config, linear_method: Optional[LinearMethodBase]=None):
if config.hidden_size == 4096:
super().__init__(config, 'ROPE', linear_method)
else:
super().__init__(config, 'ALIBI', linear_method) | null |
split_tensor_along_last_dim | """ Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
Returns:
A list of Tensors
"""
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list | def split_tensor_along_last_dim(tensor: torch.Tensor, num_partitions: int,
contiguous_split_chunks: bool=False) ->Sequence[torch.Tensor]:
""" Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
Returns:
A list of Tensors
"""
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list | Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
Returns:
A list of Tensors |
_tokenize | """Returns a tokenized string."""
return self.sp_model.encode(text, out_type=str) | def _tokenize(self, text):
"""Returns a tokenized string."""
return self.sp_model.encode(text, out_type=str) | Returns a tokenized string. |
weight_loader | tp_rank = get_tensor_model_parallel_rank()
output_dim = getattr(param, 'output_dim', None)
param_data = param.data
if output_dim is not None:
shard_size = param_data.shape[output_dim]
start_idx = tp_rank * shard_size
loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
assert param_data.shape == loaded_weight.shape
param_data.copy_(loaded_weight) | def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
tp_rank = get_tensor_model_parallel_rank()
output_dim = getattr(param, 'output_dim', None)
param_data = param.data
if output_dim is not None:
shard_size = param_data.shape[output_dim]
start_idx = tp_rank * shard_size
loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
assert param_data.shape == loaded_weight.shape
param_data.copy_(loaded_weight) | null |
is_empty | return self.num_tokens == 0 | def is_empty(self) ->bool:
return self.num_tokens == 0 | null |
get_new_and_finished_requests | """Get the new requests and finished requests to be
sent to the engine."""
new_requests: List[Dict] = []
finished_requests: Set[str] = set()
while not self._finished_requests.empty():
request_id = self._finished_requests.get_nowait()
finished_requests.add(request_id)
self._request_streams.pop(request_id, None)
while not self._new_requests.empty():
stream, new_request = self._new_requests.get_nowait()
if stream.request_id in finished_requests:
stream.finish()
continue
self._request_streams[stream.request_id] = stream
new_requests.append(new_request)
self.new_requests_event.clear()
return new_requests, finished_requests | def get_new_and_finished_requests(self) ->Tuple[List[Dict], Set[str]]:
"""Get the new requests and finished requests to be
sent to the engine."""
new_requests: List[Dict] = []
finished_requests: Set[str] = set()
while not self._finished_requests.empty():
request_id = self._finished_requests.get_nowait()
finished_requests.add(request_id)
self._request_streams.pop(request_id, None)
while not self._new_requests.empty():
stream, new_request = self._new_requests.get_nowait()
if stream.request_id in finished_requests:
stream.finish()
continue
self._request_streams[stream.request_id] = stream
new_requests.append(new_request)
self.new_requests_event.clear()
return new_requests, finished_requests | Get the new requests and finished requests to be
sent to the engine. |
__init__ | super().__init__()
self.hidden_size = config.hidden_size
rope_theta = getattr(config, 'rope_theta', 10000)
rope_scaling = getattr(config, 'rope_scaling', None)
max_position_embeddings = getattr(config, 'max_position_embeddings', 8192)
self.self_attn = AquilaAttention(hidden_size=self.hidden_size, num_heads=
config.num_attention_heads, num_kv_heads=config.num_key_value_heads,
rope_theta=rope_theta, max_position_embeddings=max_position_embeddings,
rope_scaling=rope_scaling, linear_method=linear_method)
self.mlp = AquilaMLP(hidden_size=self.hidden_size, intermediate_size=config
.intermediate_size, hidden_act=config.hidden_act, linear_method=
linear_method)
self.input_layernorm = AquilaRMSNorm(config.hidden_size, eps=config.
rms_norm_eps)
self.post_attention_layernorm = AquilaRMSNorm(config.hidden_size, eps=
config.rms_norm_eps) | def __init__(self, config: AquilaConfig, linear_method: Optional[
LinearMethodBase]=None):
super().__init__()
self.hidden_size = config.hidden_size
rope_theta = getattr(config, 'rope_theta', 10000)
rope_scaling = getattr(config, 'rope_scaling', None)
max_position_embeddings = getattr(config, 'max_position_embeddings', 8192)
self.self_attn = AquilaAttention(hidden_size=self.hidden_size,
num_heads=config.num_attention_heads, num_kv_heads=config.
num_key_value_heads, rope_theta=rope_theta, max_position_embeddings
=max_position_embeddings, rope_scaling=rope_scaling, linear_method=
linear_method)
self.mlp = AquilaMLP(hidden_size=self.hidden_size, intermediate_size=
config.intermediate_size, hidden_act=config.hidden_act,
linear_method=linear_method)
self.input_layernorm = AquilaRMSNorm(config.hidden_size, eps=config.
rms_norm_eps)
self.post_attention_layernorm = AquilaRMSNorm(config.hidden_size, eps=
config.rms_norm_eps) | null |
forward | hidden_states = self.embed_tokens(input_ids)
residual = None
for i in range(len(self.layers)):
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, kv_caches[i],
input_metadata, residual)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states | def forward(self, input_ids: torch.Tensor, positions: torch.Tensor,
kv_caches: List[KVCache], input_metadata: InputMetadata) ->torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
residual = None
for i in range(len(self.layers)):
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, kv_caches
[i], input_metadata, residual)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states | null |
__init__ | super().__init__()
self.hidden_size = config.hidden_size
rope_theta = getattr(config, 'rope_theta', 10000)
max_position_embeddings = getattr(config, 'max_position_embeddings', 8192)
self.self_attn = BaiChuanAttention(hidden_size=self.hidden_size, num_heads=
config.num_attention_heads, position_embedding=position_embedding,
rope_theta=rope_theta, max_position_embeddings=max_position_embeddings,
linear_method=linear_method)
self.mlp = BaiChuanMLP(hidden_size=self.hidden_size, intermediate_size=
config.intermediate_size, hidden_act=config.hidden_act, linear_method=
linear_method)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.
rms_norm_eps) | def __init__(self, config: BaiChuanConfig, position_embedding: str,
linear_method: Optional[LinearMethodBase]=None):
super().__init__()
self.hidden_size = config.hidden_size
rope_theta = getattr(config, 'rope_theta', 10000)
max_position_embeddings = getattr(config, 'max_position_embeddings', 8192)
self.self_attn = BaiChuanAttention(hidden_size=self.hidden_size,
num_heads=config.num_attention_heads, position_embedding=
position_embedding, rope_theta=rope_theta, max_position_embeddings=
max_position_embeddings, linear_method=linear_method)
self.mlp = BaiChuanMLP(hidden_size=self.hidden_size, intermediate_size=
config.intermediate_size, hidden_act=config.hidden_act,
linear_method=linear_method)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.
rms_norm_eps) | null |
__init__ | if max_num_batched_tokens is not None:
self.max_num_batched_tokens = max_num_batched_tokens
else:
self.max_num_batched_tokens = max(max_model_len, 2048)
self.max_num_seqs = max_num_seqs
self.max_model_len = max_model_len
self.max_paddings = max_paddings
self._verify_args() | def __init__(self, max_num_batched_tokens: Optional[int], max_num_seqs: int,
max_model_len: int, max_paddings: int) ->None:
if max_num_batched_tokens is not None:
self.max_num_batched_tokens = max_num_batched_tokens
else:
self.max_num_batched_tokens = max(max_model_len, 2048)
self.max_num_seqs = max_num_seqs
self.max_model_len = max_model_len
self.max_paddings = max_paddings
self._verify_args() | null |
capture_model | assert not self.model_config.enforce_eager
logger.info(
"Capturing the model for CUDA graphs. This may lead to unexpected consequences if the model is not static. To run the model in eager mode, set 'enforce_eager=True' or use '--enforce-eager' in the CLI."
)
logger.info(
'CUDA graphs can take additional 1~3 GiB memory per GPU. If you are running out of memory, consider decreasing `gpu_memory_utilization` or enforcing eager mode.'
)
start_time = time.perf_counter()
max_batch_size = max(_BATCH_SIZES_TO_CAPTURE)
input_tokens = torch.zeros(max_batch_size, 1, dtype=torch.long).cuda()
input_positions = torch.zeros(max_batch_size, 1, dtype=torch.long).cuda()
slot_mapping = torch.empty(max_batch_size, 1, dtype=torch.long).cuda()
slot_mapping.fill_(_PAD_SLOT_ID)
context_lens = torch.ones(max_batch_size, dtype=torch.int32).cuda()
block_tables = torch.from_numpy(self.graph_block_tables).cuda()
for batch_size in reversed(_BATCH_SIZES_TO_CAPTURE):
input_metadata = InputMetadata(is_prompt=False, slot_mapping=
slot_mapping[:batch_size], max_context_len=self.
max_context_len_to_capture, context_lens=context_lens[:batch_size],
block_tables=block_tables[:batch_size], use_cuda_graph=True)
graph_runner = CUDAGraphRunner(self.model)
graph_runner.capture(input_tokens[:batch_size], input_positions[:
batch_size], kv_caches, input_metadata, memory_pool=self.
graph_memory_pool)
self.graph_memory_pool = graph_runner.graph.pool()
self.graph_runners[batch_size] = graph_runner
end_time = time.perf_counter()
elapsed_time = end_time - start_time
logger.info(f'Graph capturing finished in {elapsed_time:.0f} secs.') | @torch.inference_mode()
def capture_model(self, kv_caches: List[KVCache]) ->None:
assert not self.model_config.enforce_eager
logger.info(
"Capturing the model for CUDA graphs. This may lead to unexpected consequences if the model is not static. To run the model in eager mode, set 'enforce_eager=True' or use '--enforce-eager' in the CLI."
)
logger.info(
'CUDA graphs can take additional 1~3 GiB memory per GPU. If you are running out of memory, consider decreasing `gpu_memory_utilization` or enforcing eager mode.'
)
start_time = time.perf_counter()
max_batch_size = max(_BATCH_SIZES_TO_CAPTURE)
input_tokens = torch.zeros(max_batch_size, 1, dtype=torch.long).cuda()
input_positions = torch.zeros(max_batch_size, 1, dtype=torch.long).cuda()
slot_mapping = torch.empty(max_batch_size, 1, dtype=torch.long).cuda()
slot_mapping.fill_(_PAD_SLOT_ID)
context_lens = torch.ones(max_batch_size, dtype=torch.int32).cuda()
block_tables = torch.from_numpy(self.graph_block_tables).cuda()
for batch_size in reversed(_BATCH_SIZES_TO_CAPTURE):
input_metadata = InputMetadata(is_prompt=False, slot_mapping=
slot_mapping[:batch_size], max_context_len=self.
max_context_len_to_capture, context_lens=context_lens[:
batch_size], block_tables=block_tables[:batch_size],
use_cuda_graph=True)
graph_runner = CUDAGraphRunner(self.model)
graph_runner.capture(input_tokens[:batch_size], input_positions[:
batch_size], kv_caches, input_metadata, memory_pool=self.
graph_memory_pool)
self.graph_memory_pool = graph_runner.graph.pool()
self.graph_runners[batch_size] = graph_runner
end_time = time.perf_counter()
elapsed_time = end_time - start_time
logger.info(f'Graph capturing finished in {elapsed_time:.0f} secs.') | null |
sample | next_tokens = self.sampler(self.lm_head_weight, hidden_states,
sampling_metadata)
return next_tokens | def sample(self, hidden_states: torch.Tensor, sampling_metadata:
SamplingMetadata) ->Optional[SamplerOutput]:
next_tokens = self.sampler(self.lm_head_weight, hidden_states,
sampling_metadata)
return next_tokens | null |
__init__ | super().__init__()
self.config = config
self.linear_method = linear_method
self.transformer = GPT2Model(config, linear_method)
self.lm_head_weight = self.transformer.wte.weight
self.sampler = Sampler(config.vocab_size) | def __init__(self, config: GPT2Config, linear_method: Optional[
LinearMethodBase]=None):
super().__init__()
self.config = config
self.linear_method = linear_method
self.transformer = GPT2Model(config, linear_method)
self.lm_head_weight = self.transformer.wte.weight
self.sampler = Sampler(config.vocab_size) | null |
forward | attn_input = self.input_layernorm(hidden_states)
attn_output = self.attention(position_ids=position_ids, hidden_states=
attn_input, kv_cache=kv_cache, input_metadata=input_metadata)
if self.use_parallel_residual:
mlp_input = self.post_attention_layernorm(hidden_states)
mlp_output = self.mlp(mlp_input)
hidden_states = mlp_output + attn_output + hidden_states
else:
attn_output = attn_output + hidden_states
mlp_input = self.post_attention_layernorm(attn_output)
mlp_output = self.mlp(mlp_input)
hidden_states = mlp_output + attn_output
return hidden_states | def forward(self, position_ids: torch.Tensor, hidden_states: torch.Tensor,
kv_cache: KVCache, input_metadata: InputMetadata) ->torch.Tensor:
attn_input = self.input_layernorm(hidden_states)
attn_output = self.attention(position_ids=position_ids, hidden_states=
attn_input, kv_cache=kv_cache, input_metadata=input_metadata)
if self.use_parallel_residual:
mlp_input = self.post_attention_layernorm(hidden_states)
mlp_output = self.mlp(mlp_input)
hidden_states = mlp_output + attn_output + hidden_states
else:
attn_output = attn_output + hidden_states
mlp_input = self.post_attention_layernorm(attn_output)
mlp_output = self.mlp(mlp_input)
hidden_states = mlp_output + attn_output
return hidden_states | null |