Update README.md
Browse files
README.md
CHANGED
@@ -306,7 +306,7 @@ python benchmarks/benchmark_latency.py --input-len 256 --output-len 256 --model
|
|
306 |
|
307 |
### int4wo-hqq
|
308 |
```Shell
|
309 |
-
python benchmarks/benchmark_latency.py --input-len 256 --output-len 256 --model pytorch/Phi-4-mini-instruct-int4wo-hqq --batch-size 1
|
310 |
```
|
311 |
|
312 |
## benchmark_serving
|
@@ -337,7 +337,7 @@ vllm serve pytorch/Phi-4-mini-instruct-int4wo-hqq --tokenizer microsoft/Phi-4-mi
|
|
337 |
|
338 |
Client:
|
339 |
```Shell
|
340 |
-
python benchmarks/benchmark_serving.py --backend vllm --dataset-name sharegpt --tokenizer microsoft/Phi-4-mini-instruct --dataset-path ./ShareGPT_V3_unfiltered_cleaned_split.json --model pytorch/Phi-4-mini-instruct-int4wo-hqq --num-prompts 1
|
341 |
```
|
342 |
|
343 |
|
|
|
306 |
|
307 |
### int4wo-hqq
|
308 |
```Shell
|
309 |
+
VLLM_DISABLE_COMPILE_CACHE=1 python benchmarks/benchmark_latency.py --input-len 256 --output-len 256 --model pytorch/Phi-4-mini-instruct-int4wo-hqq --batch-size 1
|
310 |
```
|
311 |
|
312 |
## benchmark_serving
|
|
|
337 |
|
338 |
Client:
|
339 |
```Shell
|
340 |
+
VLLM_DISABLE_COMPILE_CACHE=1 python benchmarks/benchmark_serving.py --backend vllm --dataset-name sharegpt --tokenizer microsoft/Phi-4-mini-instruct --dataset-path ./ShareGPT_V3_unfiltered_cleaned_split.json --model pytorch/Phi-4-mini-instruct-int4wo-hqq --num-prompts 1
|
341 |
```
|
342 |
|
343 |
|