Spaces:
Running
on
T4
Running
on
T4
fix(MAX_NUM_BATCHED_TOKENS): fix typo
Browse files- Dockerfile +1 -1
Dockerfile
CHANGED
@@ -37,7 +37,7 @@ ENV HF_HOME="/tmp/.cache/huggingface"
|
|
37 |
|
38 |
EXPOSE 7860
|
39 |
|
40 |
-
ENTRYPOINT ["/bin/bash", "-c", "vllm serve ${MODEL_NAME} --task ${TASK} --revision ${MODEL_REVISION} --code-revision ${MODEL_REVISION} --tokenizer-revision ${MODEL_NAME} --seed 42 --host 0.0.0.0 --port 7860 --max-num-batched-tokens ${MAX_NUM_BATCHED_TOKENS}
|
41 |
|
42 |
# # FROM nvidia/cuda:12.1.0-cudnn8-runtime-ubuntu22.04
|
43 |
# FROM nvidia/cuda:12.9.1-cudnn-runtime-ubuntu24.04
|
|
|
37 |
|
38 |
EXPOSE 7860
|
39 |
|
40 |
+
ENTRYPOINT ["/bin/bash", "-c", "vllm serve ${MODEL_NAME} --task ${TASK} --revision ${MODEL_REVISION} --code-revision ${MODEL_REVISION} --tokenizer-revision ${MODEL_NAME} --seed 42 --host 0.0.0.0 --port 7860 --max-num-batched-tokens ${MAX_NUM_BATCHED_TOKENS} --max-model-len ${MAX_MODEL_LEN} --dtype float16 --enforce-eager --gpu-memory-utilization 0.9 --enable-prefix-caching --disable-log-requests --trust-remote-code"]
|
41 |
|
42 |
# # FROM nvidia/cuda:12.1.0-cudnn8-runtime-ubuntu22.04
|
43 |
# FROM nvidia/cuda:12.9.1-cudnn-runtime-ubuntu24.04
|