|
"""A GPU worker class.""" |
|
|
|
import os |
|
from typing import Dict, List, Optional, Tuple |
|
|
|
import torch |
|
import torch.distributed |
|
|
|
from vllm.config import CacheConfig, ModelConfig, ParallelConfig, SchedulerConfig |
|
from vllm.model_executor import set_random_seed |
|
from vllm.model_executor.parallel_utils.communication_op import broadcast_object_list |
|
from vllm.model_executor.parallel_utils.parallel_state import initialize_model_parallel |
|
from vllm.sequence import SamplerOutput, SequenceGroupMetadata |
|
from vllm.worker.cache_engine import CacheEngine |
|
|
|
from .model_runner import ModelRunner |
|
|
|
|
|
class Worker: |
|
"""A worker class that executes (a partition of) the model on a GPU. |
|
|
|
Each worker is associated with a single GPU. The worker is responsible for |
|
maintaining the KV cache and executing the model on the GPU. In case of |
|
distributed inference, each worker is assigned a partition of the model. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
model_config: ModelConfig, |
|
parallel_config: ParallelConfig, |
|
scheduler_config: SchedulerConfig, |
|
local_rank: int, |
|
rank: int, |
|
distributed_init_method: str, |
|
post_model_path: str, |
|
is_driver_worker: bool = False, |
|
) -> None: |
|
self.model_config = model_config |
|
self.parallel_config = parallel_config |
|
self.scheduler_config = scheduler_config |
|
self.local_rank = local_rank |
|
self.rank = rank |
|
self.distributed_init_method = distributed_init_method |
|
self.is_driver_worker = is_driver_worker |
|
self.post_model_path = post_model_path |
|
|
|
if self.is_driver_worker: |
|
assert self.rank == 0, "The driver worker must have rank 0." |
|
|
|
self.model_runner = ModelRunner( |
|
model_config, |
|
parallel_config, |
|
scheduler_config, |
|
is_driver_worker, |
|
post_model_path, |
|
) |
|
|
|
|
|
self.cache_config = None |
|
self.cache_engine = None |
|
self.cache_events = None |
|
self.gpu_cache = None |
|
|
|
def init_model(self) -> None: |
|
|
|
|
|
|
|
|
|
|
|
|
|
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" |
|
|
|
|
|
os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None) |
|
self.device = torch.device(f"cuda:{self.local_rank}") |
|
torch.cuda.set_device(self.device) |
|
|
|
_check_if_gpu_supports_dtype(self.model_config.dtype) |
|
|
|
|
|
_init_distributed_environment( |
|
self.parallel_config, self.rank, self.distributed_init_method |
|
) |
|
|
|
|
|
set_random_seed(self.model_config.seed) |
|
|
|
def load_model(self): |
|
self.model_runner.load_model() |
|
|
|
@torch.inference_mode() |
|
def profile_num_available_blocks( |
|
self, |
|
block_size: int, |
|
gpu_memory_utilization: float, |
|
cpu_swap_space: int, |
|
) -> Tuple[int, int]: |
|
|
|
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
self.model_runner.profile_run() |
|
|
|
|
|
|
|
torch.cuda.synchronize() |
|
free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info() |
|
peak_memory = total_gpu_memory - free_gpu_memory |
|
|
|
cache_block_size = CacheEngine.get_cache_block_size( |
|
block_size, self.model_config, self.parallel_config |
|
) |
|
num_gpu_blocks = int( |
|
(total_gpu_memory * gpu_memory_utilization - peak_memory) |
|
// cache_block_size |
|
) |
|
num_cpu_blocks = int(cpu_swap_space // cache_block_size) |
|
num_gpu_blocks = max(num_gpu_blocks, 0) |
|
num_cpu_blocks = max(num_cpu_blocks, 0) |
|
torch.cuda.empty_cache() |
|
return num_gpu_blocks, num_cpu_blocks |
|
|
|
def init_cache_engine(self, cache_config: CacheConfig) -> None: |
|
self.cache_config = cache_config |
|
self.cache_engine = CacheEngine( |
|
self.cache_config, self.model_config, self.parallel_config |
|
) |
|
self.cache_events = self.cache_engine.events |
|
self.gpu_cache = self.cache_engine.gpu_cache |
|
self.model_runner.set_block_size(self.cache_engine.block_size) |
|
|
|
def warm_up_model(self) -> None: |
|
if not self.model_config.enforce_eager: |
|
self.model_runner.capture_model(self.gpu_cache) |
|
|
|
|
|
set_random_seed(self.model_config.seed) |
|
|
|
def cache_swap( |
|
self, |
|
blocks_to_swap_in: Dict[int, int], |
|
blocks_to_swap_out: Dict[int, int], |
|
blocks_to_copy: Dict[int, List[int]], |
|
) -> None: |
|
|
|
issued_cache_op = False |
|
if blocks_to_swap_in: |
|
self.cache_engine.swap_in(blocks_to_swap_in) |
|
issued_cache_op = True |
|
if blocks_to_swap_out: |
|
self.cache_engine.swap_out(blocks_to_swap_out) |
|
issued_cache_op = True |
|
if blocks_to_copy: |
|
self.cache_engine.copy(blocks_to_copy) |
|
issued_cache_op = True |
|
|
|
cache_events = self.cache_events if issued_cache_op else None |
|
|
|
|
|
|
|
if cache_events is not None: |
|
for event in cache_events: |
|
event.wait() |
|
|
|
@torch.inference_mode() |
|
def execute_model( |
|
self, |
|
seq_group_metadata_list: Optional[List[SequenceGroupMetadata]] = None, |
|
blocks_to_swap_in: Optional[Dict[int, int]] = None, |
|
blocks_to_swap_out: Optional[Dict[int, int]] = None, |
|
blocks_to_copy: Optional[Dict[int, List[int]]] = None, |
|
) -> Optional[SamplerOutput]: |
|
if self.is_driver_worker: |
|
assert seq_group_metadata_list is not None |
|
num_seq_groups = len(seq_group_metadata_list) |
|
assert blocks_to_swap_in is not None |
|
assert blocks_to_swap_out is not None |
|
assert blocks_to_copy is not None |
|
block_swapping_info = [ |
|
blocks_to_swap_in, |
|
blocks_to_swap_out, |
|
blocks_to_copy, |
|
] |
|
broadcast_object_list([num_seq_groups] + block_swapping_info, src=0) |
|
else: |
|
|
|
|
|
recv_data = [None] * 4 |
|
broadcast_object_list(recv_data, src=0) |
|
num_seq_groups = recv_data[0] |
|
block_swapping_info = recv_data[1:] |
|
|
|
self.cache_swap(*block_swapping_info) |
|
|
|
|
|
if num_seq_groups == 0: |
|
return {} |
|
|
|
output = self.model_runner.execute_model( |
|
seq_group_metadata_list, self.gpu_cache |
|
) |
|
return output |
|
|
|
|
|
def _init_distributed_environment( |
|
parallel_config: ParallelConfig, |
|
rank: int, |
|
distributed_init_method: Optional[str] = None, |
|
) -> None: |
|
"""Initialize the distributed environment.""" |
|
if torch.distributed.is_initialized(): |
|
torch_world_size = torch.distributed.get_world_size() |
|
if torch_world_size != parallel_config.world_size: |
|
raise RuntimeError( |
|
"torch.distributed is already initialized but the torch world " |
|
"size does not match parallel_config.world_size " |
|
f"({torch_world_size} vs. {parallel_config.world_size})." |
|
) |
|
elif not distributed_init_method: |
|
raise ValueError( |
|
"distributed_init_method must be set if torch.distributed " |
|
"is not already initialized" |
|
) |
|
else: |
|
torch.distributed.init_process_group( |
|
backend="nccl", |
|
world_size=parallel_config.world_size, |
|
rank=rank, |
|
init_method=distributed_init_method, |
|
) |
|
|
|
|
|
torch.distributed.all_reduce(torch.zeros(1).cuda()) |
|
initialize_model_parallel( |
|
parallel_config.tensor_parallel_size, parallel_config.pipeline_parallel_size |
|
) |
|
|
|
|
|
def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype): |
|
|
|
if torch_dtype == torch.bfloat16: |
|
compute_capability = torch.cuda.get_device_capability() |
|
if compute_capability[0] < 8: |
|
gpu_name = torch.cuda.get_device_name() |
|
raise ValueError( |
|
"Bfloat16 is only supported on GPUs with compute capability " |
|
f"of at least 8.0. Your {gpu_name} GPU has compute capability " |
|
f"{compute_capability[0]}.{compute_capability[1]}." |
|
) |
|
|