Jae-Won Chung commited on
Commit
48d37df
·
unverified ·
1 Parent(s): 61d89e9

Delete outdated run.sh

Browse files
benchmark/llm_text_generation/chat/scripts/run.sh DELETED
@@ -1,71 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- run() {
4
- # TP 4 GPUs
5
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 5.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
6
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 4.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
7
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 4.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
8
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 3.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
9
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 3.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
10
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 2.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
11
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 2.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
12
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
13
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
14
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.75 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
15
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
16
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.25 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
17
-
18
- # TP 2 GPUs
19
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 2.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
20
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 2.25 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
21
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 2.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
22
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.75 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
23
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
24
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.25 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
25
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
26
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.75 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
27
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
28
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.25 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1
29
-
30
- # 1 GPU
31
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.25 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
32
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.125 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
33
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 1.00 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
34
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.875 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
35
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.75 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
36
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.625 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
37
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.50 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
38
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.375 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
39
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.25 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
40
- python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 0.125 --power-limit $PL --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0
41
- }
42
-
43
- # Warmup
44
- timeout --signal SIGINT 120 python scripts/benchmark_one.py --backend vllm --server-image mlenergy/vllm:v0.3.0-openai --model meta-llama/Llama-2-13b-chat-hf --sharegpt-path ../../../sharegpt/ShareGPT_V3_filtered_500.json --request-rate 5.00 --power-limit 300 --result-root results/2024-02-19-scaling --huggingface-token $HF_TOKEN --gpu-ids 0 1 2 3
45
-
46
- # PL=300
47
- # run
48
- #
49
- # PL=275
50
- # run
51
- #
52
- # PL=250
53
- # run
54
- #
55
- # PL=225
56
- # run
57
- #
58
- # PL=200
59
- # run
60
-
61
- PL=175
62
- run
63
-
64
- PL=150
65
- run
66
-
67
- PL=125
68
- run
69
-
70
- PL=100
71
- run