fxmarty commited on
Commit
78936d4
·
1 Parent(s): 90c1496

Adding regression benchmark for the transformers SHA 1d75768695f667fc1efcb8823c062d41ad30f090

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.0308,32.5,5.91,33.8
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:14:49,020][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:14:49,022][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:14:49,309][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:14:49,309][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:14:49,309][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:14:49,450][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:14:49,465][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:14:49,466][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 13:15:56,453][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:15:56,454][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:16:04,289][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:16:05,565][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-11 13:16:05,565][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-11 13:16:05,566][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:16:05,875][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:16:26,234][inference][INFO] - + Forward pass latency: 3.08e-02 (s)
17
+ [2023-08-11 13:16:26,235][inference][INFO] - + Forward pass throughput: 32.50 (samples/s)
18
+ [2023-08-11 13:16:26,235][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:16:32,836][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:16:56,461][inference][INFO] - + Generation pass latency: 5.91e+00 (s)
21
+ [2023-08-11 13:16:56,463][inference][INFO] - + Generation pass throughput: 33.80 (tokens/s)
22
+ [2023-08-11 13:16:56,463][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:16:56,473][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0643,15.6,5.65,35.4
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:16:56,944][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:16:56,945][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:16:57,216][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:16:57,216][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:16:57,216][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:16:57,315][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:16:57,344][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:16:57,345][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 13:17:13,842][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:17:13,844][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:17:21,651][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:17:21,727][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-11 13:17:21,728][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-11 13:17:21,728][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:17:23,867][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:18:30,251][inference][INFO] - + Forward pass latency: 6.43e-02 (s)
17
+ [2023-08-11 13:18:30,252][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
+ [2023-08-11 13:18:30,253][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:18:35,925][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:18:58,546][inference][INFO] - + Generation pass latency: 5.65e+00 (s)
21
+ [2023-08-11 13:18:58,548][inference][INFO] - + Generation pass throughput: 35.40 (tokens/s)
22
+ [2023-08-11 13:18:58,548][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:18:58,555][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0312,64.1,6.09,65.7
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:18:59,037][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:18:59,038][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:18:59,238][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:18:59,238][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:18:59,239][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:18:59,343][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:18:59,370][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:18:59,371][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 13:19:10,029][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:19:10,030][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:19:17,885][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:19:17,925][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-11 13:19:17,925][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-11 13:19:17,925][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:19:18,414][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:19:49,278][inference][INFO] - + Forward pass latency: 3.12e-02 (s)
17
+ [2023-08-11 13:19:49,279][inference][INFO] - + Forward pass throughput: 64.10 (samples/s)
18
+ [2023-08-11 13:19:49,279][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:19:56,163][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:20:20,515][inference][INFO] - + Generation pass latency: 6.09e+00 (s)
21
+ [2023-08-11 13:20:20,517][inference][INFO] - + Generation pass throughput: 65.70 (tokens/s)
22
+ [2023-08-11 13:20:20,517][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:20:20,523][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30778.720255999997,0.109,18.3,7.07,56.6
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:20:20,989][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:20:20,990][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:20:21,178][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:20:21,178][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:20:21,179][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:20:21,281][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:20:21,309][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:20:21,310][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 13:20:37,854][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:20:37,856][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:20:45,764][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:20:45,889][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
+ [2023-08-11 13:20:45,889][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
+ [2023-08-11 13:20:45,889][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:20:49,720][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:22:00,267][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-11 13:22:00,268][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-11 13:22:00,268][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:22:07,347][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:22:28,566][inference][INFO] - + Generation pass latency: 7.07e+00 (s)
21
+ [2023-08-11 13:22:28,568][inference][INFO] - + Generation pass throughput: 56.60 (tokens/s)
22
+ [2023-08-11 13:22:28,568][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:22:28,576][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17000.431615999998,0.0314,127.0,6.14,130.0
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:22:29,090][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:22:29,091][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:22:29,335][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:22:29,335][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:22:29,335][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:22:29,438][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:22:29,464][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:22:29,465][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 13:22:40,062][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:22:40,064][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:22:47,854][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:22:47,900][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
13
+ [2023-08-11 13:22:47,901][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
14
+ [2023-08-11 13:22:47,901][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:22:48,665][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:23:36,664][inference][INFO] - + Forward pass latency: 3.14e-02 (s)
17
+ [2023-08-11 13:23:36,665][inference][INFO] - + Forward pass throughput: 127.00 (samples/s)
18
+ [2023-08-11 13:23:36,666][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:23:43,794][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:24:08,353][inference][INFO] - + Generation pass latency: 6.14e+00 (s)
21
+ [2023-08-11 13:24:08,355][inference][INFO] - + Generation pass throughput: 130.00 (tokens/s)
22
+ [2023-08-11 13:24:08,356][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:24:08,366][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.7,104.0
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:24:08,855][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:24:08,857][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:24:09,065][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:24:09,066][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:24:09,066][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:24:09,168][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:24:09,194][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:24:09,195][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 13:24:26,149][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:24:26,151][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:24:34,018][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:24:34,227][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-11 13:24:34,227][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-11 13:24:34,231][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:24:41,160][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:25:55,890][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-11 13:25:55,892][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-11 13:25:55,893][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:26:03,661][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:26:26,749][inference][INFO] - + Generation pass latency: 7.70e+00 (s)
21
+ [2023-08-11 13:26:26,751][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-11 13:26:26,751][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:26:26,758][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0974,164.0,6.32,506.0
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:26:27,318][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:26:27,319][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:26:27,589][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:26:27,589][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:26:27,589][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:26:27,693][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:26:27,719][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:26:27,720][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 13:26:38,488][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:26:38,489][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:26:46,306][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:26:46,417][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-11 13:26:46,417][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-11 13:26:46,417][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:26:49,043][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:27:43,147][inference][INFO] - + Forward pass latency: 9.74e-02 (s)
17
+ [2023-08-11 13:27:43,148][inference][INFO] - + Forward pass throughput: 164.00 (samples/s)
18
+ [2023-08-11 13:27:43,148][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:27:49,692][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:28:14,985][inference][INFO] - + Generation pass latency: 6.32e+00 (s)
21
+ [2023-08-11 13:28:14,987][inference][INFO] - + Generation pass throughput: 506.00 (tokens/s)
22
+ [2023-08-11 13:28:14,987][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:28:14,994][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.684,23.4,13.0,246.0
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:28:15,581][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:28:15,582][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:28:15,771][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 13:28:15,772][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:28:15,772][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:28:15,874][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:28:15,901][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:28:15,901][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 13:28:32,762][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:28:32,764][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:28:40,635][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:28:41,347][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-11 13:28:41,347][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-11 13:28:41,364][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 13:29:06,746][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 13:30:24,780][inference][INFO] - + Forward pass latency: 6.84e-01 (s)
17
+ [2023-08-11 13:30:24,781][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-11 13:30:24,781][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 13:30:38,445][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 13:31:04,411][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
+ [2023-08-11 13:31:04,413][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
+ [2023-08-11 13:31:04,413][inference][INFO] - Saving inference results
23
+ [2023-08-11 13:31:04,419][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,460.599296,0.00314,318.0
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:31:09,272][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:31:09,273][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:31:09,450][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-11 13:31:09,450][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:31:09,451][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:31:09,451][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:31:09,452][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:31:09,453][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 13:31:10,044][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:31:10,045][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:31:10,167][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-11 13:31:10,168][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-11 13:31:10,217][inference][INFO] - + Forward pass peak memory: 460.599296 (MB)
14
+ [2023-08-11 13:31:10,218][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-11 13:31:10,220][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-11 13:31:10,252][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-11 13:31:20,365][inference][INFO] - + Forward pass latency: 3.14e-03 (s)
18
+ [2023-08-11 13:31:20,367][inference][INFO] - + Forward pass throughput: 318.00 (samples/s)
19
+ [2023-08-11 13:31:20,367][inference][INFO] - Saving inference results
20
+ [2023-08-11 13:31:20,379][backend][INFO] - Cleaning backend
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,463.794176,0.00394,254.0,0.494,202.0
raw_results/2023-08-11_12:40:51_1d75768695f667fc1efcb8823c062d41ad30f090/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 13:31:24,950][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 13:31:24,951][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 13:31:25,138][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-11 13:31:25,138][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 13:31:25,138][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 13:31:25,139][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 13:31:25,140][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 13:31:25,141][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 13:31:25,802][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 13:31:25,803][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 13:31:26,003][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 13:31:26,051][inference][INFO] - + Forward pass peak memory: 463.794176 (MB)
13
+ [2023-08-11 13:31:26,053][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-11 13:31:26,085][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-11 13:31:36,178][inference][INFO] - + Forward pass latency: 3.94e-03 (s)
16
+ [2023-08-11 13:31:36,180][inference][INFO] - + Forward pass throughput: 254.00 (samples/s)
17
+ [2023-08-11 13:31:36,181][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-11 13:31:36,703][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-11 13:31:47,074][inference][INFO] - + Generation pass latency: 4.94e-01 (s)
20
+ [2023-08-11 13:31:47,074][inference][INFO] - + Generation pass throughput: 202.00 (tokens/s)
21
+ [2023-08-11 13:31:47,075][inference][INFO] - Saving inference results
22
+ [2023-08-11 13:31:47,089][backend][INFO] - Cleaning backend