fxmarty commited on
Commit
e42428a
·
1 Parent(s): 86c199f

Adding regression benchmark for the transformers SHA 2b22cde71e549e194a3b7046b7595030382382e9

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.0403,24.8,7.77,25.7
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:07:47,592][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:07:47,594][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:07:47,874][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:07:47,875][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:07:47,875][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:07:48,018][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:07:48,034][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:07:48,036][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:08:57,160][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:08:57,162][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:09:05,918][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:09:07,187][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-13 19:09:07,187][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-13 19:09:07,188][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:09:07,592][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:09:27,779][inference][INFO] - + Forward pass latency: 4.03e-02 (s)
17
+ [2023-08-13 19:09:27,779][inference][INFO] - + Forward pass throughput: 24.80 (samples/s)
18
+ [2023-08-13 19:09:27,780][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:09:36,265][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:09:59,586][inference][INFO] - + Generation pass latency: 7.77e+00 (s)
21
+ [2023-08-13 19:09:59,589][inference][INFO] - + Generation pass throughput: 25.70 (tokens/s)
22
+ [2023-08-13 19:09:59,589][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:09:59,598][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0644,15.5,7.37,27.1
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:10:00,072][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:10:00,074][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:10:00,266][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:10:00,266][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:10:00,266][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:10:00,367][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:10:00,406][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:10:00,407][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:10:17,670][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:10:17,672][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:10:26,262][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:10:26,338][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-13 19:10:26,338][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-13 19:10:26,339][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:10:28,471][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:11:34,720][inference][INFO] - + Forward pass latency: 6.44e-02 (s)
17
+ [2023-08-13 19:11:34,721][inference][INFO] - + Forward pass throughput: 15.50 (samples/s)
18
+ [2023-08-13 19:11:34,722][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:11:42,092][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:12:04,215][inference][INFO] - + Generation pass latency: 7.37e+00 (s)
21
+ [2023-08-13 19:12:04,217][inference][INFO] - + Generation pass throughput: 27.10 (tokens/s)
22
+ [2023-08-13 19:12:04,217][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:12:04,225][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0409,48.9,7.98,50.1
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:12:04,711][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:12:04,712][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:12:04,904][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:12:04,904][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:12:04,904][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:12:05,007][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:12:05,045][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:12:05,046][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:12:16,454][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:12:16,455][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:12:25,068][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:12:25,116][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-13 19:12:25,116][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-13 19:12:25,117][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:12:25,600][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:12:49,376][inference][INFO] - + Forward pass latency: 4.09e-02 (s)
17
+ [2023-08-13 19:12:49,377][inference][INFO] - + Forward pass throughput: 48.90 (samples/s)
18
+ [2023-08-13 19:12:49,377][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:12:58,181][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:13:22,117][inference][INFO] - + Generation pass latency: 7.98e+00 (s)
21
+ [2023-08-13 19:13:22,119][inference][INFO] - + Generation pass throughput: 50.10 (tokens/s)
22
+ [2023-08-13 19:13:22,119][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:13:22,126][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30780.817408,0.109,18.3,7.08,56.5
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:13:22,606][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:13:22,607][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:13:22,802][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:13:22,802][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:13:22,802][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:13:22,907][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:13:22,945][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:13:22,946][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:13:40,313][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:13:40,315][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:13:48,817][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:13:48,940][memory_tracker][INFO] - Peak memory usage: 30780.817408 MB
13
+ [2023-08-13 19:13:48,940][inference][INFO] - + Forward pass peak memory: 30780.817408 (MB)
14
+ [2023-08-13 19:13:48,941][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:13:52,763][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:15:03,423][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-13 19:15:03,424][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-13 19:15:03,424][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:15:10,517][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:15:31,769][inference][INFO] - + Generation pass latency: 7.08e+00 (s)
21
+ [2023-08-13 19:15:31,770][inference][INFO] - + Generation pass throughput: 56.50 (tokens/s)
22
+ [2023-08-13 19:15:31,771][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:15:31,779][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17002.528768,0.0321,125.0,6.28,127.0
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:15:32,284][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:15:32,285][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:15:32,477][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:15:32,477][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:15:32,477][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:15:32,580][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:15:32,619][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:15:32,620][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:15:43,987][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:15:43,989][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:15:52,515][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:15:52,564][memory_tracker][INFO] - Peak memory usage: 17002.528768 MB
13
+ [2023-08-13 19:15:52,565][inference][INFO] - + Forward pass peak memory: 17002.528768 (MB)
14
+ [2023-08-13 19:15:52,565][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:15:53,307][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:16:40,349][inference][INFO] - + Forward pass latency: 3.21e-02 (s)
17
+ [2023-08-13 19:16:40,350][inference][INFO] - + Forward pass throughput: 125.00 (samples/s)
18
+ [2023-08-13 19:16:40,351][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:16:47,605][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:17:12,734][inference][INFO] - + Generation pass latency: 6.28e+00 (s)
21
+ [2023-08-13 19:17:12,736][inference][INFO] - + Generation pass throughput: 127.00 (tokens/s)
22
+ [2023-08-13 19:17:12,736][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:17:12,749][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.7,104.0
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:17:13,238][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:17:13,239][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:17:13,430][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:17:13,430][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:17:13,431][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:17:13,533][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:17:13,573][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:17:13,573][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:17:30,896][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:17:30,898][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:17:39,448][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:17:39,661][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-13 19:17:39,661][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-13 19:17:39,666][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:17:46,593][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:19:01,255][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-13 19:19:01,257][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-13 19:19:01,257][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:19:09,042][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:19:32,146][inference][INFO] - + Generation pass latency: 7.70e+00 (s)
21
+ [2023-08-13 19:19:32,148][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-13 19:19:32,148][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:19:32,155][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0989,162.0,6.46,495.0
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:19:32,708][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:19:32,710][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:19:32,903][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:19:32,903][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:19:32,903][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:19:33,005][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:19:33,042][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:19:33,043][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:19:44,510][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:19:44,512][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:19:53,130][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:19:53,241][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-13 19:19:53,241][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-13 19:19:53,241][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:19:55,898][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:20:50,042][inference][INFO] - + Forward pass latency: 9.89e-02 (s)
17
+ [2023-08-13 19:20:50,043][inference][INFO] - + Forward pass throughput: 162.00 (samples/s)
18
+ [2023-08-13 19:20:50,043][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:20:56,729][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:21:22,585][inference][INFO] - + Generation pass latency: 6.46e+00 (s)
21
+ [2023-08-13 19:21:22,586][inference][INFO] - + Generation pass throughput: 495.00 (tokens/s)
22
+ [2023-08-13 19:21:22,586][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:21:22,593][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.683,23.4,13.0,246.0
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:21:23,174][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:21:23,175][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:21:23,357][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:21:23,358][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:21:23,358][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:21:23,462][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:21:23,499][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:21:23,500][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:21:40,982][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:21:40,983][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:21:49,497][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:21:50,216][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-13 19:21:50,216][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-13 19:21:50,232][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:22:15,578][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:23:33,502][inference][INFO] - + Forward pass latency: 6.83e-01 (s)
17
+ [2023-08-13 19:23:33,503][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-13 19:23:33,503][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:23:47,161][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:24:13,131][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
+ [2023-08-13 19:24:13,133][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
+ [2023-08-13 19:24:13,133][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:24:13,141][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,460.23065599999995,0.00324,309.0
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:24:17,896][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:24:17,897][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:24:18,071][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-13 19:24:18,071][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:24:18,071][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:24:18,071][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:24:18,073][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:24:18,073][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-13 19:24:18,666][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:24:18,667][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:24:18,804][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-13 19:24:18,806][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-13 19:24:18,860][inference][INFO] - + Forward pass peak memory: 460.23065599999995 (MB)
14
+ [2023-08-13 19:24:18,862][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-13 19:24:18,863][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-13 19:24:18,896][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-13 19:24:29,007][inference][INFO] - + Forward pass latency: 3.24e-03 (s)
18
+ [2023-08-13 19:24:29,009][inference][INFO] - + Forward pass throughput: 309.00 (samples/s)
19
+ [2023-08-13 19:24:29,009][inference][INFO] - Saving inference results
20
+ [2023-08-13 19:24:29,023][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,463.59756799999997,0.00336,298.0,0.481,208.0
raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:24:33,409][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:24:33,410][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:24:33,589][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-13 19:24:33,589][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:24:33,589][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:24:33,589][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:24:33,591][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:24:33,591][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-13 19:24:34,236][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:24:34,237][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:24:34,429][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:24:34,479][inference][INFO] - + Forward pass peak memory: 463.59756799999997 (MB)
13
+ [2023-08-13 19:24:34,480][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-13 19:24:34,514][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-13 19:24:44,616][inference][INFO] - + Forward pass latency: 3.36e-03 (s)
16
+ [2023-08-13 19:24:44,619][inference][INFO] - + Forward pass throughput: 298.00 (samples/s)
17
+ [2023-08-13 19:24:44,620][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-13 19:24:45,113][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-13 19:24:55,225][inference][INFO] - + Generation pass latency: 4.81e-01 (s)
20
+ [2023-08-13 19:24:55,225][inference][INFO] - + Generation pass throughput: 208.00 (tokens/s)
21
+ [2023-08-13 19:24:55,226][inference][INFO] - Saving inference results
22
+ [2023-08-13 19:24:55,239][backend][INFO] - Cleaning backend