vllm.engine.async_llm_engine.AsyncEngineDeadError
Does anyone know how to solve this problem?
INFO 06-06 22:07:06 metrics.py:229] Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 0.0 tokens/s, Running: 1 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.0%, CPU KV cache usage: 0.0%
INFO 06-06 22:07:13 async_llm_engine.py:524] Received request cmpl-408419f690a540d8a83af1375ee06b91: prompt: '[gMASK]<|user|>\n你好<|assistant|>', sampling_params: SamplingParams(n=1, best_of=1, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, temperature=0.7, top_p=1.0, top_k=-1, min_p=0.0, seed=None, use_beam_search=False, length_penalty=1.0, early_stopping=False, stop=[], stop_token_ids=[151329, 151336, 151338], include_stop_str_in_output=False, ignore_eos=False, max_tokens=1024, min_tokens=0, logprobs=None, prompt_logprobs=None, skip_special_tokens=True, spaces_between_special_tokens=True, truncate_prompt_tokens=None), prompt_token_ids: [151331, 151333, 151331, 151333, 151336, 198, 109377, 151337], lora_request: None.
INFO 06-06 22:07:13 async_llm_engine.py:154] Aborted request cmpl-408419f690a540d8a83af1375ee06b91.
INFO: - "POST /v1/chat/completions HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/workspace/miniconda3/lib/python3.10/site-packages/uvicorn/protocols/http/httptools_impl.py", line 411, in run_asgi
result = await app( # type: ignore[func-returns-value]
File "/workspace/miniconda3/lib/python3.10/site-packages/uvicorn/middleware/proxy_headers.py", line 69, in call
return await self.app(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/fastapi/applications.py", line 1054, in call
await super().call(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/applications.py", line 123, in call
await self.middleware_stack(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/errors.py", line 186, in call
raise exc
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/errors.py", line 164, in call
await self.app(scope, receive, _send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/cors.py", line 85, in call
await self.app(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/exceptions.py", line 65, in call
await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 64, in wrapped_app
raise exc
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 756, in call
await self.middleware_stack(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 776, in app
await route.handle(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 297, in handle
await self.app(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 77, in app
await wrap_app_handling_exceptions(app, request)(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 64, in wrapped_app
raise exc
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 72, in app
response = await func(request)
File "/workspace/miniconda3/lib/python3.10/site-packages/fastapi/routing.py", line 278, in app
raw_response = await run_endpoint_function(
File "/workspace/miniconda3/lib/python3.10/site-packages/fastapi/routing.py", line 191, in run_endpoint_function
return await dependant.call(**values)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py", line 90, in create_chat_completion
generator = await openai_serving_chat.create_chat_completion(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_chat.py", line 95, in create_chat_completion
return await self.chat_completion_full_generator(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_chat.py", line 258, in chat_completion_full_generator
async for res in result_generator:
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 661, in generate
raise e
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 655, in generate
async for request_output in stream:
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 77, in anext
raise result
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 38, in _raise_exception_on_finish
task.result()
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 496, in run_engine_loop
has_requests_in_progress = await asyncio.wait_for(
File "/workspace/miniconda3/lib/python3.10/asyncio/tasks.py", line 445, in wait_for
return fut.result()
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 470, in engine_step
request_outputs = await self.engine.step_async()
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 213, in step_async
output = await self.model_executor.execute_model_async(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/executor/ray_gpu_executor.py", line 418, in execute_model_async
all_outputs = await self._run_workers_async(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/executor/ray_gpu_executor.py", line 408, in _run_workers_async
all_outputs = await asyncio.gather(*coros)
File "/workspace/miniconda3/lib/python3.10/asyncio/tasks.py", line 650, in _wrap_awaitable
return (yield from awaitable.await())
ray.exceptions.RayTaskError(CalledProcessError): ray::RayWorkerWrapper.execute_method() (pid=15106, ip=172.22.18.9, actor_id=b8b1d8450328240a61f1355701000000, repr=<vllm.engine.ray_utils.RayWorkerWrapper object at 0x7efd2538f2e0>)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/worker/worker_base.py", line 158, in execute_method
raise e
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/worker/worker_base.py", line 149, in execute_method
return executor(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/worker/worker.py", line 249, in execute_model
output = self.model_runner.execute_model(seq_group_metadata_list,
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/worker/model_runner.py", line 848, in execute_model
hidden_states = model_executable(**execute_model_kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/model_executor/models/chatglm.py", line 354, in forward
hidden_states = self.transformer(input_ids, positions, kv_caches,
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/model_executor/models/chatglm.py", line 309, in forward
hidden_states = self.encoder(
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/model_executor/models/chatglm.py", line 266, in forward
hidden_states = layer(
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/model_executor/models/chatglm.py", line 204, in forward
attention_output = self.self_attention(
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/model_executor/models/chatglm.py", line 104, in forward
context_layer = self.attn(
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/attention/layer.py", line 48, in forward
return self.impl.forward(query, key, value, kv_cache, attn_metadata,
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/attention/backends/xformers.py", line 237, in forward
out = PagedAttention.forward_prefix(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/attention/ops/paged_attn.py", line 177, in forward_prefix
context_attention_fwd(
File "/workspace/miniconda3/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/attention/ops/prefix_prefill.py", line 708, in context_attention_fwd
_fwd_kernel[grid](
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/runtime/jit.py", line 550, in run
bin.c_wrapper(
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/compiler/compiler.py", line 692, in __getattribute__
self._init_handles()
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/compiler/compiler.py", line 670, in _init_handles
bin_path = {driver.HIP: "hsaco_path", driver.CUDA: "cubin"}[driver.backend]
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/runtime/driver.py", line 157, in getattr
self._initialize_obj()
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/runtime/driver.py", line 154, in _initialize_obj
self._obj = self._init_fn()
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/runtime/driver.py", line 187, in initialize_driver
return CudaDriver()
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/runtime/driver.py", line 77, in init
self.utils = CudaUtils()
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/runtime/driver.py", line 47, in init
so = _build("cuda_utils", src_path, tmpdir)
File "/workspace/miniconda3/lib/python3.10/site-packages/triton/common/build.py", line 106, in _build
ret = subprocess.check_call(cc_cmd)
File "/workspace/miniconda3/lib/python3.10/subprocess.py", line 369, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['/usr/bin/gcc', '/tmp/tmp5eeudfgr/main.c', '-O3', '-I/workspace/miniconda3/lib/python3.10/site-packages/triton/common/../third_party/cuda/include', '-I/workspace/miniconda3/include/python3.10', '-I/tmp/tmp5eeudfgr', '-shared', '-fPIC', '-lcuda', '-o', '/tmp/tmp5eeudfgr/cuda_utils.cpython-310-x86_64-linux-gnu.so', '-L/usr/lib64']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/workspace/miniconda3/lib/python3.10/site-packages/uvicorn/protocols/http/httptools_impl.py", line 411, in run_asgi
result = await app( # type: ignore[func-returns-value]
File "/workspace/miniconda3/lib/python3.10/site-packages/uvicorn/middleware/proxy_headers.py", line 69, in call
return await self.app(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/fastapi/applications.py", line 1054, in call
await super().call(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/applications.py", line 123, in call
await self.middleware_stack(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/errors.py", line 186, in call
raise exc
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/errors.py", line 164, in call
await self.app(scope, receive, _send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/cors.py", line 85, in call
await self.app(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/middleware/exceptions.py", line 65, in call
await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 64, in wrapped_app
raise exc
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 756, in call
await self.middleware_stack(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 776, in app
await route.handle(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 297, in handle
await self.app(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 77, in app
await wrap_app_handling_exceptions(app, request)(scope, receive, send)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 64, in wrapped_app
raise exc
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/_exception_handler.py", line 53, in wrapped_app
await app(scope, receive, sender)
File "/workspace/miniconda3/lib/python3.10/site-packages/starlette/routing.py", line 72, in app
response = await func(request)
File "/workspace/miniconda3/lib/python3.10/site-packages/fastapi/routing.py", line 278, in app
raw_response = await run_endpoint_function(
File "/workspace/miniconda3/lib/python3.10/site-packages/fastapi/routing.py", line 191, in run_endpoint_function
return await dependant.call(**values)
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py", line 90, in create_chat_completion
generator = await openai_serving_chat.create_chat_completion(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_chat.py", line 95, in create_chat_completion
return await self.chat_completion_full_generator(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_chat.py", line 258, in chat_completion_full_generator
async for res in result_generator:
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 661, in generate
raise e
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 645, in generate
stream = await self.add_request(
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 532, in add_request
self.start_background_loop()
File "/workspace/miniconda3/lib/python3.10/site-packages/vllm/engine/async_llm_engine.py", line 406, in start_background_loop
raise AsyncEngineDeadError(
vllm.engine.async_llm_engine.AsyncEngineDeadError: Background loop has errored already.
the same issue